rippled  0.24.0-rc1
Reference implementation of the Ripple Protocol
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
rocksdb Namespace Reference

Namespaces

 anon
 
 crc32c
 
 log
 
 port
 
 stl_wrappers
 
 test
 

Classes

class  CorruptionTest
 
class  RandomGenerator
 
class  Stats
 
struct  SharedState
 
struct  ThreadState
 
class  Duration
 
class  Benchmark
 
struct  CompareLogByPointer
 
class  DBImpl
 
class  DBImplReadOnly
 
class  DBStatistics
 
class  SpecialEnv
 
class  DBTest
 
class  KeepFilter
 
class  DeleteFilter
 
class  ChangeFilter
 
class  KeepFilterFactory
 
class  DeleteFilterFactory
 
class  ChangeFilterFactory
 
class  ModelDB
 
struct  ParsedInternalKey
 
class  InternalKeyComparator
 
class  InternalFilterPolicy
 
class  InternalKey
 
class  LookupKey
 
class  FormatTest
 
class  DeleteFileTest
 
class  FileNameTest
 
class  MemTableIterator
 
class  MemTable
 
class  MemTableList
 
class  MergeHelper
 
class  PerfContextTest
 
class  PrefixFilterIterator
 
struct  TestKey
 
class  TestKeyComparator
 
class  PrefixTest
 
class  SimpleTableReader
 
class  SimpleTableIterator
 
class  SimpleTableBuilder
 
class  SimpleTableFactory
 
class  SimpleTableDBTest
 
class  SkipList
 
struct  TestComparator
 
class  SkipTest
 
class  ConcurrentTest
 
class  TestState
 
class  SnapshotImpl
 
class  SnapshotList
 
class  TableCache
 
struct  InternalKeyTablePropertiesNames
 
class  InternalKeyPropertiesCollector
 
class  UserKeyTablePropertiesCollector
 
class  TablePropertiesTest
 
class  FakeWritableFile
 
class  FakeRandomeAccessFile
 
class  DumbLogger
 
class  RegularKeysStartWithA
 
struct  LogReporter
 
class  LogFileImpl
 
class  TransactionLogIteratorImpl
 
struct  FileMetaData
 
class  VersionEdit
 
class  VersionEditTest
 
class  Version
 
class  VersionSet
 
class  Compaction
 
class  FindFileTest
 
class  WriteBatchInternal
 
class  WriteBatchTest
 
class  HdfsEnv
 
class  MemEnvTest
 
class  Arena
 
class  Cache
 
class  CompactionFilter
 
class  CompactionFilterFactory
 
class  DefaultCompactionFilterFactory
 
class  Comparator
 
struct  LiveFileMetaData
 
class  Snapshot
 
struct  Range
 
class  DB
 
struct  EnvOptions
 
class  Env
 
class  SequentialFile
 
class  RandomAccessFile
 
class  WritableFile
 
class  RandomRWFile
 
class  Logger
 
class  FileLock
 
class  EnvWrapper
 
class  FilterPolicy
 
class  FlushBlockPolicy
 
class  FlushBlockPolicyFactory
 
class  FlushBlockBySizePolicyFactory
 
class  Iterator
 
class  LDBTool
 
class  MemTableRep
 
class  MemTableRepFactory
 
class  VectorRepFactory
 
class  SkipListFactory
 
class  TransformRepFactory
 
class  UnsortedRepFactory
 
class  PrefixHashRepFactory
 
class  MergeOperator
 
class  AssociativeMergeOperator
 
struct  CompressionOptions
 
struct  Options
 
struct  ReadOptions
 
struct  WriteOptions
 
struct  FlushOptions
 
struct  PerfContext
 
class  Slice
 
struct  SliceParts
 
class  SliceTransform
 
struct  HistogramData
 
class  Histogram
 
class  Ticker
 
class  Statistics
 
class  Status
 
class  TableBuilder
 
class  TableReader
 
class  TableFactory
 
struct  TableProperties
 
class  TablePropertiesCollector
 
class  LogFile
 
struct  BatchResult
 
class  TransactionLogIterator
 
class  CompactionOptionsUniversal
 
class  WriteBatch
 
class  StackableDB
 
class  UtilityDB
 
class  Block
 
class  BlockBasedTableBuilder
 
class  BlockBasedTableFactory
 
class  BlockBasedTable
 
struct  BlockBasedTablePropertiesNames
 
class  BlockBuilder
 
class  BlockTest
 
class  FilterBlockBuilder
 
class  FilterBlockReader
 
class  TestHashFilter
 
class  FilterBlockTest
 
class  FlushBlockBySizePolicy
 
class  BlockHandle
 
class  Footer
 
struct  BlockContents
 
class  MaxIteratorComparator
 
class  MinIteratorComparator
 
class  IteratorWrapper
 
class  StringSink
 
class  StringSource
 
class  Constructor
 
class  BlockConstructor
 
class  BlockBasedTableConstructor
 
class  KeyConvertingIterator
 
class  MemTableConstructor
 
class  DBConstructor
 
struct  TestArgs
 
class  Harness
 
class  TableTest
 
class  BlockCacheProperties
 
class  MemTableTest
 
class  StressTest
 
class  ReduceLevelTest
 
class  DBClientProxy
 
class  SstFileReader
 
class  ArenaImpl
 
class  ArenaImplTest
 
class  AutoRollLogger
 
class  AutoRollLoggerTest
 
class  BitSet
 
struct  BlobChunk
 
struct  Blob
 
class  FreeList
 
class  BlobStore
 
class  BlobStoreTest
 
class  BloomTest
 
class  CacheTest
 
class  Value
 
class  Coding
 
class  EnvPosixTest
 
struct  State
 
class  LockTest
 
class  HashSkipListRepFactory
 
class  HistogramBucketMapper
 
class  HistogramImpl
 
class  HistogramTest
 
class  InMemoryHandler
 
class  LDBCommand
 
class  CompactorCommand
 
class  DBDumperCommand
 
class  InternalDumpCommand
 
class  DBLoaderCommand
 
class  ManifestDumpCommand
 
class  ReduceDBLevelsCommand
 
class  ChangeCompactionStyleCommand
 
class  WALDumperCommand
 
class  GetCommand
 
class  ApproxSizeCommand
 
class  BatchPutCommand
 
class  ScanCommand
 
class  DeleteCommand
 
class  PutCommand
 
class  DBQuerierCommand
 
class  LDBCommandExecuteResult
 
class  LDBCommandRunner
 
class  MutexLock
 
class  ReadLock
 
class  WriteLock
 
class  PosixLogger
 
class  Random
 
class  Random64
 
class  StatsLogger
 
class  StopWatch
 
class  StopWatchNano
 
class  StringAppendOperator
 
class  StringAppendTESTOperator
 
class  StringLists
 
class  StringAppendOperatorTest
 
class  MergeOperators
 
class  RedisListException
 
class  RedisListIterator
 
class  RedisLists
 
class  RedisListsTest
 
class  DBWithTTL
 
class  TtlIterator
 
class  TtlCompactionFilter
 
class  TtlCompactionFilterFactory
 
class  TtlMergeOperator
 
class  TtlTest
 

Typedefs

typedef uint64_t Key
 
typedef std::vector
< std::unique_ptr< LogFile > > 
VectorLogPtr
 
typedef uint64_t SequenceNumber
 
typedef std::priority_queue
< IteratorWrapper
*, std::vector
< IteratorWrapper * >
, MaxIteratorComparator
MaxIterHeap
 
typedef std::priority_queue
< IteratorWrapper
*, std::vector
< IteratorWrapper * >
, MinIteratorComparator
MinIterHeap
 
typedef std::map< std::string,
std::string, anon::STLLessThan
KVMap
 

Enumerations

enum  ValueType { kTypeDeletion = 0x0, kTypeValue = 0x1, kTypeMerge = 0x2, kTypeLogData = 0x3 }
 
enum  FileType {
  kLogFile, kDBLockFile, kTableFile, kDescriptorFile,
  kCurrentFile, kTempFile, kInfoLogFile, kMetaDatabase,
  kIdentityFile
}
 
enum  Tag {
  kComparator = 1, kLogNumber = 2, kNextFileNumber = 3, kLastSequence = 4,
  kCompactPointer = 5, kDeletedFile = 6, kNewFile = 7, kPrevLogNumber = 9,
  kNewFile2 = 100
}
 
enum  CompressionType : char { kNoCompression = 0x0, kSnappyCompression = 0x1, kZlibCompression = 0x2, kBZip2Compression = 0x3 }
 
enum  CompactionStyle : char { kCompactionStyleLevel = 0x0, kCompactionStyleUniversal = 0x1 }
 
enum  ReadTier { kReadAllTier = 0x0, kBlockCacheTier = 0x1 }
 
enum  PerfLevel { kDisable = 0, kEnableCount = 1, kEnableTime = 2 }
 
enum  Tickers {
  BLOCK_CACHE_MISS, BLOCK_CACHE_HIT, BLOCK_CACHE_ADD, BLOCK_CACHE_INDEX_MISS,
  BLOCK_CACHE_INDEX_HIT, BLOCK_CACHE_FILTER_MISS, BLOCK_CACHE_FILTER_HIT, BLOCK_CACHE_DATA_MISS,
  BLOCK_CACHE_DATA_HIT, BLOOM_FILTER_USEFUL, COMPACTION_KEY_DROP_NEWER_ENTRY, COMPACTION_KEY_DROP_OBSOLETE,
  COMPACTION_KEY_DROP_USER, NUMBER_KEYS_WRITTEN, NUMBER_KEYS_READ, NUMBER_KEYS_UPDATED,
  BYTES_WRITTEN, BYTES_READ, NO_FILE_CLOSES, NO_FILE_OPENS,
  NO_FILE_ERRORS, STALL_L0_SLOWDOWN_MICROS, STALL_MEMTABLE_COMPACTION_MICROS, STALL_L0_NUM_FILES_MICROS,
  RATE_LIMIT_DELAY_MILLIS, NO_ITERATORS, NUMBER_MULTIGET_CALLS, NUMBER_MULTIGET_KEYS_READ,
  NUMBER_MULTIGET_BYTES_READ, NUMBER_FILTERED_DELETES, NUMBER_MERGE_FAILURES, SEQUENCE_NUMBER,
  BLOOM_FILTER_PREFIX_CHECKED, BLOOM_FILTER_PREFIX_USEFUL, NUMBER_OF_RESEEKS_IN_ITERATION, GET_UPDATES_SINCE_CALLS,
  BLOCK_CACHE_COMPRESSED_MISS, BLOCK_CACHE_COMPRESSED_HIT, TICKER_ENUM_MAX
}
 
enum  Histograms {
  DB_GET, DB_WRITE, COMPACTION_TIME, TABLE_SYNC_MICROS,
  COMPACTION_OUTFILE_SYNC_MICROS, WAL_FILE_SYNC_MICROS, MANIFEST_FILE_SYNC_MICROS, TABLE_OPEN_IO_MICROS,
  DB_MULTIGET, READ_BLOCK_COMPACTION_MICROS, READ_BLOCK_GET_MICROS, WRITE_RAW_BLOCK_MICROS,
  STALL_L0_SLOWDOWN_COUNT, STALL_MEMTABLE_COMPACTION_COUNT, STALL_L0_NUM_FILES_COUNT, HARD_RATE_LIMIT_DELAY_COUNT,
  SOFT_RATE_LIMIT_DELAY_COUNT, NUM_FILES_IN_SINGLE_COMPACTION, HISTOGRAM_ENUM_MAX
}
 
enum  WalFileType { kArchivedLogFile = 0, kAliveLogFile = 1 }
 
enum  CompactionStopStyle { kCompactionStopStyleSimilarSize, kCompactionStopStyleTotalSize }
 
enum  TestType { TABLE_TEST, BLOCK_TEST, MEMTABLE_TEST, DB_TEST }
 

Functions

TableBuilderGetTableBuilder (const Options &options, WritableFile *file, CompressionType compression_type)
 
Status BuildTable (const std::string &dbname, Env *env, const Options &options, const EnvOptions &soptions, TableCache *table_cache, Iterator *iter, FileMetaData *meta, const Comparator *user_comparator, const SequenceNumber newest_snapshot, const SequenceNumber earliest_seqno_in_memtable, const bool enable_compression)
 
 TEST (CorruptionTest, Recovery)
 
 TEST (CorruptionTest, RecoverWriteError)
 
 TEST (CorruptionTest, NewFileErrorDuringWrite)
 
 TEST (CorruptionTest, TableFile)
 
 TEST (CorruptionTest, TableFileIndexData)
 
 TEST (CorruptionTest, MissingDescriptor)
 
 TEST (CorruptionTest, SequenceNumberRecovery)
 
 TEST (CorruptionTest, CorruptedDescriptor)
 
 TEST (CorruptionTest, CompactionInputError)
 
 TEST (CorruptionTest, CompactionInputErrorParanoid)
 
 TEST (CorruptionTest, UnrelatedKeys)
 
static void AppendWithSpace (std::string *str, Slice msg)
 
void dumpLeveldbBuildVersion (Logger *log)
 
template<class T , class V >
static void ClipToRange (T *ptr, V minvalue, V maxvalue)
 
Options SanitizeOptions (const std::string &dbname, const InternalKeyComparator *icmp, const InternalFilterPolicy *ipolicy, const Options &src)
 
CompressionType GetCompressionType (const Options &options, int level, const bool enable_compression)
 
Status DestroyDB (const std::string &dbname, const Options &options)
 
IteratorNewDBIterator (const std::string *dbname, Env *env, const Options &options, const Comparator *user_key_comparator, Iterator *internal_iter, const SequenceNumber &sequence)
 
std::shared_ptr< StatisticsCreateDBStatistics ()
 
static bool SnappyCompressionSupported (const CompressionOptions &options)
 
static bool ZlibCompressionSupported (const CompressionOptions &options)
 
static bool BZip2CompressionSupported (const CompressionOptions &options)
 
static std::string RandomString (Random *rnd, int len)
 
static std::string Key (int i)
 
 TEST (DBTest, Empty)
 
 TEST (DBTest, ReadWrite)
 
 TEST (DBTest, IndexAndFilterBlocksOfNewTableAddedToCache)
 
 TEST (DBTest, LevelLimitReopen)
 
 TEST (DBTest, Preallocation)
 
 TEST (DBTest, PutDeleteGet)
 
 TEST (DBTest, GetFromImmutableLayer)
 
 TEST (DBTest, GetFromVersions)
 
 TEST (DBTest, GetSnapshot)
 
 TEST (DBTest, GetLevel0Ordering)
 
 TEST (DBTest, GetOrderedByLevels)
 
 TEST (DBTest, GetPicksCorrectFile)
 
 TEST (DBTest, GetEncountersEmptyLevel)
 
 TEST (DBTest, KeyMayExist)
 
 TEST (DBTest, NonBlockingIteration)
 
 TEST (DBTest, FilterDeletes)
 
 TEST (DBTest, IterEmpty)
 
 TEST (DBTest, IterSingle)
 
 TEST (DBTest, IterMulti)
 
 TEST (DBTest, IterReseek)
 
 TEST (DBTest, IterSmallAndLargeMix)
 
 TEST (DBTest, IterMultiWithDelete)
 
 TEST (DBTest, IterPrevMaxSkip)
 
 TEST (DBTest, IterWithSnapshot)
 
 TEST (DBTest, Recover)
 
 TEST (DBTest, RollLog)
 
 TEST (DBTest, WAL)
 
 TEST (DBTest, CheckLock)
 
 TEST (DBTest, FlushMultipleMemtable)
 
 TEST (DBTest, NumImmutableMemTable)
 
 TEST (DBTest, FLUSH)
 
 TEST (DBTest, RecoveryWithEmptyLog)
 
 TEST (DBTest, RecoverDuringMemtableCompaction)
 
 TEST (DBTest, MinorCompactionsHappen)
 
 TEST (DBTest, ManifestRollOver)
 
 TEST (DBTest, IdentityAcrossRestarts)
 
 TEST (DBTest, RecoverWithLargeLog)
 
 TEST (DBTest, CompactionsGenerateMultipleFiles)
 
 TEST (DBTest, CompactionTrigger)
 
 TEST (DBTest, UniversalCompactionTrigger)
 
 TEST (DBTest, UniversalCompactionSizeAmplification)
 
 TEST (DBTest, UniversalCompactionOptions)
 
 TEST (DBTest, ConvertCompactionStyle)
 
void MinLevelHelper (DBTest *self, Options &options)
 
bool MinLevelToCompress (CompressionType &type, Options &options, int wbits, int lev, int strategy)
 
 TEST (DBTest, MinLevelToCompress1)
 
 TEST (DBTest, MinLevelToCompress2)
 
 TEST (DBTest, RepeatedWritesToSameKey)
 
 TEST (DBTest, InPlaceUpdate)
 
 TEST (DBTest, CompactionFilter)
 
 TEST (DBTest, CompactionFilterWithValueChange)
 
 TEST (DBTest, SparseMerge)
 
static bool Between (uint64_t val, uint64_t low, uint64_t high)
 
 TEST (DBTest, ApproximateSizes)
 
 TEST (DBTest, ApproximateSizes_MixOfSmallAndLarge)
 
 TEST (DBTest, IteratorPinsRef)
 
 TEST (DBTest, Snapshot)
 
 TEST (DBTest, HiddenValuesAreRemoved)
 
 TEST (DBTest, CompactBetweenSnapshots)
 
 TEST (DBTest, DeletionMarkers1)
 
 TEST (DBTest, DeletionMarkers2)
 
 TEST (DBTest, OverlapInLevel0)
 
 TEST (DBTest, L0_CompactionBug_Issue44_a)
 
 TEST (DBTest, L0_CompactionBug_Issue44_b)
 
 TEST (DBTest, ComparatorCheck)
 
 TEST (DBTest, CustomComparator)
 
 TEST (DBTest, ManualCompaction)
 
 TEST (DBTest, DBOpen_Options)
 
 TEST (DBTest, DBOpen_Change_NumLevels)
 
 TEST (DBTest, DestroyDBMetaDatabase)
 
 TEST (DBTest, NoSpace)
 
 TEST (DBTest, NonWritableFileSystem)
 
 TEST (DBTest, ManifestWriteError)
 
 TEST (DBTest, PutFailsParanoid)
 
 TEST (DBTest, FilesDeletedAfterCompaction)
 
 TEST (DBTest, BloomFilter)
 
 TEST (DBTest, SnapshotFiles)
 
 TEST (DBTest, CompactOnFlush)
 
std::vector< std::uint64_tListLogFiles (Env *env, const std::string &path)
 
 TEST (DBTest, WALArchivalTtl)
 
uint64_t GetLogDirSize (std::string dir_path, SpecialEnv *env)
 
 TEST (DBTest, WALArchivalSizeLimit)
 
SequenceNumber ReadRecords (std::unique_ptr< TransactionLogIterator > &iter, int &count)
 
void ExpectRecords (const int expected_no_records, std::unique_ptr< TransactionLogIterator > &iter)
 
 TEST (DBTest, TransactionLogIterator)
 
 TEST (DBTest, TransactionLogIteratorMoveOverZeroFiles)
 
 TEST (DBTest, TransactionLogIteratorJustEmptyFile)
 
 TEST (DBTest, TransactionLogIteratorCheckAfterRestart)
 
 TEST (DBTest, TransactionLogIteratorCorruptedLog)
 
 TEST (DBTest, TransactionLogIteratorBatchOperations)
 
 TEST (DBTest, TransactionLogIteratorBlobs)
 
 TEST (DBTest, ReadCompaction)
 
 TEST (DBTest, MultiThreaded)
 
static std::string RandomKey (Random *rnd, int minimum=0)
 
static bool CompareIterators (int step, DB *model, DB *db, const Snapshot *model_snap, const Snapshot *db_snap)
 
 TEST (DBTest, Randomized)
 
 TEST (DBTest, MultiGetSimple)
 
 TEST (DBTest, MultiGetEmpty)
 
void PrefixScanInit (DBTest *dbtest)
 
 TEST (DBTest, PrefixScan)
 
std::string MakeKey (unsigned int num)
 
void BM_LogAndApply (int iters, int num_base_files)
 
static uint64_t PackSequenceAndType (uint64_t seq, ValueType t)
 
void AppendInternalKey (std::string *result, const ParsedInternalKey &key)
 
size_t InternalKeyEncodingLength (const ParsedInternalKey &key)
 
bool ParseInternalKey (const Slice &internal_key, ParsedInternalKey *result)
 
Slice ExtractUserKey (const Slice &internal_key)
 
ValueType ExtractValueType (const Slice &internal_key)
 
void UpdateInternalKey (char *internal_key, const size_t internal_key_size, uint64_t seq, ValueType t)
 
uint64_t GetInternalKeySeqno (const Slice &internal_key)
 
static std::string IKey (const std::string &user_key, uint64_t seq, ValueType vt)
 
static std::string Shorten (const std::string &s, const std::string &l)
 
static std::string ShortSuccessor (const std::string &s)
 
static void TestKey (const std::string &key, uint64_t seq, ValueType vt)
 
 TEST (FormatTest, InternalKey_EncodeDecode)
 
 TEST (FormatTest, InternalKeyShortSeparator)
 
 TEST (FormatTest, InternalKeyShortestSuccessor)
 
 TEST (DeleteFileTest, AddKeysAndQueryLevels)
 
 TEST (DeleteFileTest, PurgeObsoleteFilesTest)
 
 TEST (DeleteFileTest, DeleteFileWithIterator)
 
 TEST (DeleteFileTest, DeleteLogFiles)
 
static int FlattenPath (const std::string &path, char *dest, int len)
 
Status WriteStringToFileSync (Env *env, const Slice &data, const std::string &fname)
 
static std::string MakeFileName (const std::string &name, uint64_t number, const char *suffix)
 
std::string LogFileName (const std::string &name, uint64_t number)
 
std::string ArchivalDirectory (const std::string &dir)
 
std::string ArchivedLogFileName (const std::string &name, uint64_t number)
 
std::string TableFileName (const std::string &name, uint64_t number)
 
std::string DescriptorFileName (const std::string &dbname, uint64_t number)
 
std::string CurrentFileName (const std::string &dbname)
 
std::string LockFileName (const std::string &dbname)
 
std::string TempFileName (const std::string &dbname, uint64_t number)
 
std::string InfoLogFileName (const std::string &dbname, const std::string &db_path, const std::string &log_dir)
 
std::string OldInfoLogFileName (const std::string &dbname, uint64_t ts, const std::string &db_path, const std::string &log_dir)
 
std::string MetaDatabaseName (const std::string &dbname, uint64_t number)
 
std::string IdentityFileName (const std::string &dbname)
 
bool ParseFileName (const std::string &fname, uint64_t *number, FileType *type, WalFileType *log_type)
 
Status SetCurrentFile (Env *env, const std::string &dbname, uint64_t descriptor_number)
 
Status SetIdentityFile (Env *env, const std::string &dbname)
 
 TEST (FileNameTest, Parse)
 
 TEST (FileNameTest, Construction)
 
static const char * EncodeKey (std::string *scratch, const Slice &target)
 
std::shared_ptr< DBOpenDb ()
 
 TEST (PerfContextTest, SeekIntoDeletion)
 
 TEST (PerfContextTest, StopWatchNanoOverhead)
 
 TEST (PerfContextTest, StopWatchOverhead)
 
void ProfileKeyComparison ()
 
 TEST (PerfContextTest, KeyComparisonCount)
 
 TEST (PerfContextTest, SeekKeyComparison)
 
Slice TestKeyToSlice (const TestKey &test_key)
 
const TestKeySliceToTestKey (const Slice &slice)
 
 TEST (PrefixTest, DynamicPrefixIterator)
 
 TEST (PrefixTest, PrefixHash)
 
Status RepairDB (const std::string &dbname, const Options &options)
 
 TEST (SimpleTableDBTest, Empty)
 
 TEST (SimpleTableDBTest, ReadWrite)
 
 TEST (SimpleTableDBTest, Flush)
 
 TEST (SimpleTableDBTest, Flush2)
 
static std::string Key (int i)
 
static std::string RandomString (Random *rnd, int len)
 
 TEST (SimpleTableDBTest, CompactionTrigger)
 
 TEST (SkipTest, Empty)
 
 TEST (SkipTest, InsertAndLookup)
 
 TEST (SkipTest, ConcurrentWithoutThreads)
 
static void ConcurrentReader (void *arg)
 
static void RunConcurrent (int run)
 
 TEST (SkipTest, Concurrent1)
 
 TEST (SkipTest, Concurrent2)
 
 TEST (SkipTest, Concurrent3)
 
 TEST (SkipTest, Concurrent4)
 
 TEST (SkipTest, Concurrent5)
 
static void DeleteEntry (const Slice &key, void *value)
 
static void UnrefEntry (void *arg1, void *arg2)
 
uint64_t GetDeletedKeys (const TableProperties::UserCollectedProperties &props)
 
void MakeBuilder (const Options &options, std::unique_ptr< FakeWritableFile > *writable, std::unique_ptr< TableBuilder > *builder)
 
void OpenTable (const Options &options, const std::string &contents, std::unique_ptr< TableReader > *table_reader)
 
 TEST (TablePropertiesTest, CustomizedTablePropertiesCollector)
 
 TEST (TablePropertiesTest, InternalKeyPropertiesCollector)
 
static bool GetInternalKey (Slice *input, InternalKey *dst)
 
static void TestEncodeDecode (const VersionEdit &edit)
 
 TEST (VersionEditTest, EncodeDecode)
 
static uint64_t TotalFileSize (const std::vector< FileMetaData * > &files)
 
int FindFile (const InternalKeyComparator &icmp, const std::vector< FileMetaData * > &files, const Slice &key)
 
static bool AfterFile (const Comparator *ucmp, const Slice *user_key, const FileMetaData *f)
 
static bool BeforeFile (const Comparator *ucmp, const Slice *user_key, const FileMetaData *f)
 
bool SomeFileOverlapsRange (const InternalKeyComparator &icmp, bool disjoint_sorted_files, const std::vector< FileMetaData * > &files, const Slice *smallest_user_key, const Slice *largest_user_key)
 
static IteratorGetFileIterator (void *arg, const ReadOptions &options, const EnvOptions &soptions, const Slice &file_value, bool for_compaction)
 
static void MarkKeyMayExist (void *arg)
 
static bool SaveValue (void *arg, const Slice &ikey, const Slice &v, bool didIO)
 
static bool NewestFirst (FileMetaData *a, FileMetaData *b)
 
static bool NewestFirstBySeqNo (FileMetaData *a, FileMetaData *b)
 
static bool compareSizeDescending (const VersionSet::Fsize &first, const VersionSet::Fsize &second)
 
static bool compareSeqnoDescending (const VersionSet::Fsize &first, const VersionSet::Fsize &second)
 
static void InputSummary (std::vector< FileMetaData * > &files, char *output, int len)
 
 TEST (FindFileTest, Empty)
 
 TEST (FindFileTest, Single)
 
 TEST (FindFileTest, Multiple)
 
 TEST (FindFileTest, MultipleNullBoundaries)
 
 TEST (FindFileTest, OverlapSequenceChecks)
 
 TEST (FindFileTest, OverlappingFiles)
 
static std::string PrintContents (WriteBatch *b)
 
 TEST (WriteBatchTest, Empty)
 
 TEST (WriteBatchTest, Multiple)
 
 TEST (WriteBatchTest, Corruption)
 
 TEST (WriteBatchTest, Append)
 
 TEST (WriteBatchTest, Blob)
 
 TEST (WriteBatchTest, Continue)
 
 TEST (WriteBatchTest, PutGatherSlices)
 
EnvNewMemEnv (Env *base_env)
 
 TEST (MemEnvTest, Basics)
 
 TEST (MemEnvTest, ReadWrite)
 
 TEST (MemEnvTest, Locks)
 
 TEST (MemEnvTest, Misc)
 
 TEST (MemEnvTest, LargeWrite)
 
 TEST (MemEnvTest, DBTest)
 
shared_ptr< CacheNewLRUCache (size_t capacity)
 
shared_ptr< CacheNewLRUCache (size_t capacity, int numShardBits)
 
shared_ptr< CacheNewLRUCache (size_t capacity, int numShardBits, int removeScanCountLimit)
 
const ComparatorBytewiseComparator ()
 
void LogFlush (const shared_ptr< Logger > &info_log)
 
void Log (const shared_ptr< Logger > &info_log, const char *format,...)
 
void LogFlush (Logger *info_log)
 
void Log (Logger *info_log, const char *format,...)
 
Status WriteStringToFile (Env *env, const Slice &data, const std::string &fname)
 
Status ReadFileToString (Env *env, const std::string &fname, std::string *data)
 
const FilterPolicyNewBloomFilterPolicy (int bits_per_key)
 
IteratorNewEmptyIterator ()
 
IteratorNewErrorIterator (const Status &status)
 
MemTableRepFactoryNewHashSkipListRepFactory (const SliceTransform *transform, size_t bucket_count=1000000)
 
void SetPerfLevel (PerfLevel level)
 
bool operator== (const Slice &x, const Slice &y)
 
bool operator!= (const Slice &x, const Slice &y)
 
const SliceTransformNewFixedPrefixTransform (size_t prefix_len)
 
const SliceTransformNewNoopTransform ()
 
void RecordTick (std::shared_ptr< Statistics > statistics, Tickers ticker, uint64_t count=1)
 
void SetTickerCount (std::shared_ptr< Statistics > statistics, Tickers ticker, uint64_t count)
 
void InstallStackTraceHandler ()
 
static const char * DecodeEntry (const char *p, const char *limit, uint32_t *shared, uint32_t *non_shared, uint32_t *value_length)
 
static void DeleteCachedBlock (const Slice &key, void *value)
 
bool SaveDidIO (void *arg, const Slice &key, const Slice &value, bool didIO)
 
static std::string RandomString (Random *rnd, int len)
 
 TEST (BlockTest, SimpleTest)
 
 TEST (FilterBlockTest, EmptyBuilder)
 
 TEST (FilterBlockTest, SingleChunk)
 
 TEST (FilterBlockTest, MultiChunk)
 
Status ReadBlockContents (RandomAccessFile *file, const ReadOptions &options, const BlockHandle &handle, BlockContents *result, Env *env, bool do_uncompress)
 
Status UncompressBlockContents (const char *data, size_t n, BlockContents *result)
 
MaxIterHeap NewMaxIterHeap (const Comparator *comparator)
 
MinIterHeap NewMinIterHeap (const Comparator *comparator)
 
IteratorNewMergingIterator (const Comparator *cmp, Iterator **list, int n)
 
static std::string MakeKey (int i, int j, bool through_db)
 
static bool DummySaveValue (void *arg, const Slice &ikey, const Slice &v, bool didIO)
 
void TableReaderBenchmark (Options &opts, EnvOptions &env_options, ReadOptions &read_options, int num_keys1, int num_keys2, int num_iter, int prefix_len, bool if_query_empty_keys, bool for_iterator, bool through_db)
 
static void Increment (const Comparator *cmp, std::string *key)
 
static bool SnappyCompressionSupported ()
 
static bool ZlibCompressionSupported ()
 
static std::vector< TestArgsGenerateArgList ()
 
 TEST (Harness, SimpleEmptyKey)
 
 TEST (Harness, SimpleSingle)
 
 TEST (Harness, SimpleMulti)
 
 TEST (Harness, SimpleSpecialKey)
 
static bool Between (uint64_t val, uint64_t low, uint64_t high)
 
 TEST (TableTest, BasicTableProperties)
 
 TEST (TableTest, FilterPolicyNameProperties)
 
static std::string RandomString (Random *rnd, int len)
 
 TEST (TableTest, IndexSizeStat)
 
 TEST (TableTest, NumBlockStat)
 
 TEST (TableTest, BlockCacheTest)
 
 TEST (TableTest, ApproximateOffsetOfPlain)
 
static void Do_Compression_Test (CompressionType comp)
 
 TEST (TableTest, ApproximateOffsetOfCompressed)
 
 TEST (TableTest, BlockCacheLeak)
 
 TEST (Harness, Randomized)
 
 TEST (Harness, RandomizedLongDB)
 
 TEST (MemTableTest, Simple)
 
IteratorNewTwoLevelIterator (Iterator *index_iter, BlockFunction block_function, void *arg, const ReadOptions &options, const EnvOptions &soptions, bool for_compaction)
 
IteratorNewTwoLevelIterator (Iterator *index_iter, Iterator *(*block_function)(void *arg, const ReadOptions &options, const EnvOptions &soptions, const Slice &index_value, bool for_compaction), void *arg, const ReadOptions &options, const EnvOptions &soptions, bool for_compaction=false)
 
static std::string Key (long val)
 
 TEST (ReduceLevelTest, Last_Level)
 
 TEST (ReduceLevelTest, Top_Level)
 
 TEST (ReduceLevelTest, All_Levels)
 
 TEST (ArenaImplTest, Empty)
 
 TEST (ArenaImplTest, MemoryAllocatedBytes)
 
 TEST (ArenaImplTest, Simple)
 
Status CreateLoggerFromOptions (const std::string &dbname, const std::string &db_log_dir, Env *env, const Options &options, std::shared_ptr< Logger > *logger)
 
void LogMessage (Logger *logger, const char *message)
 
void GetFileCreateTime (const std::string &fname, uint64_t *file_ctime)
 
 TEST (AutoRollLoggerTest, RollLogFileBySize)
 
 TEST (AutoRollLoggerTest, RollLogFileByTime)
 
 TEST (AutoRollLoggerTest, OpenLogFilesMultipleTimesWithOptionLog_max_size)
 
 TEST (AutoRollLoggerTest, CompositeRollByTimeAndSizeLogger)
 
 TEST (AutoRollLoggerTest, CreateLoggerFromOptions)
 
int OldLogFileCount (const string &dir)
 
 TEST (BlobStoreTest, RangeParseTest)
 
 TEST (BlobStoreTest, SanityTest)
 
 TEST (BlobStoreTest, FragmentedChunksTest)
 
 TEST (BlobStoreTest, CreateAndStoreTest)
 
 TEST (BlobStoreTest, MaxSizeTest)
 
static Slice Key (int i, char *buffer)
 
 TEST (BloomTest, EmptyFilter)
 
 TEST (BloomTest, Small)
 
static int NextLength (int length)
 
 TEST (BloomTest, VaryingLengths)
 
static std::string EncodeKey (int k)
 
static int DecodeKey (const Slice &k)
 
static void * EncodeValue (uintptr_t v)
 
static int DecodeValue (void *v)
 
 TEST (CacheTest, HitAndMiss)
 
 TEST (CacheTest, Erase)
 
 TEST (CacheTest, EntriesArePinned)
 
 TEST (CacheTest, EvictionPolicy)
 
 TEST (CacheTest, EvictionPolicyRef)
 
 TEST (CacheTest, EvictionPolicyRef2)
 
 TEST (CacheTest, EvictionPolicyRefLargeScanLimit)
 
 TEST (CacheTest, HeavyEntries)
 
 TEST (CacheTest, NewId)
 
void deleter (const Slice &key, void *value)
 
 TEST (CacheTest, BadEviction)
 
void EncodeFixed32 (char *buf, uint32_t value)
 
void EncodeFixed64 (char *buf, uint64_t value)
 
void PutFixed32 (std::string *dst, uint32_t value)
 
void PutFixed64 (std::string *dst, uint64_t value)
 
char * EncodeVarint32 (char *dst, uint32_t v)
 
void PutVarint32 (std::string *dst, uint32_t v)
 
char * EncodeVarint64 (char *dst, uint64_t v)
 
void PutVarint64 (std::string *dst, uint64_t v)
 
void PutLengthPrefixedSlice (std::string *dst, const Slice &value)
 
void PutLengthPrefixedSliceParts (std::string *dst, const SliceParts &slice_parts)
 
int VarintLength (uint64_t v)
 
const char * GetVarint32PtrFallback (const char *p, const char *limit, uint32_t *value)
 
bool GetVarint32 (Slice *input, uint32_t *value)
 
const char * GetVarint64Ptr (const char *p, const char *limit, uint64_t *value)
 
bool GetVarint64 (Slice *input, uint64_t *value)
 
const char * GetLengthPrefixedSlice (const char *p, const char *limit, Slice *result)
 
bool GetLengthPrefixedSlice (Slice *input, Slice *result)
 
Slice GetLengthPrefixedSlice (const char *data)
 
void BitStreamPutInt (char *dst, size_t dstlen, size_t offset, uint32_t bits, uint64_t value)
 
uint64_t BitStreamGetInt (const char *src, size_t srclen, size_t offset, uint32_t bits)
 
void BitStreamPutInt (std::string *dst, size_t offset, uint32_t bits, uint64_t value)
 
uint64_t BitStreamGetInt (const std::string *src, size_t offset, uint32_t bits)
 
uint64_t BitStreamGetInt (const Slice *src, size_t offset, uint32_t bits)
 
const char * GetVarint32Ptr (const char *p, const char *limit, uint32_t *v)
 
uint32_t DecodeFixed32 (const char *ptr)
 
uint64_t DecodeFixed64 (const char *ptr)
 
 TEST (Coding, Fixed32)
 
 TEST (Coding, Fixed64)
 
 TEST (Coding, EncodingOutput)
 
 TEST (Coding, Varint32)
 
 TEST (Coding, Varint64)
 
 TEST (Coding, Varint32Overflow)
 
 TEST (Coding, Varint32Truncation)
 
 TEST (Coding, Varint64Overflow)
 
 TEST (Coding, Varint64Truncation)
 
 TEST (Coding, Strings)
 
 TEST (Coding, BitStream)
 
 TEST (Coding, BitStreamConvenienceFuncs)
 
static void InitModule ()
 
static Status DoWriteStringToFile (Env *env, const Slice &data, const std::string &fname, bool should_sync)
 
static void SetBool (void *ptr)
 
 TEST (EnvPosixTest, RunImmediately)
 
 TEST (EnvPosixTest, RunMany)
 
static void ThreadBody (void *arg)
 
 TEST (EnvPosixTest, StartThread)
 
 TEST (EnvPosixTest, TwoPools)
 
bool IsSingleVarint (const std::string &s)
 
 TEST (EnvPosixTest, PosixRandomRWFileTest)
 
 TEST (LockTest, LockBySameThread)
 
uint32_t Hash (const char *data, size_t n, uint32_t seed)
 
 maxBucketValue_ (bucketValues_.back())
 
 minBucketValue_ (bucketValues_.front())
 
 TEST (HistogramTest, BasicOperation)
 
 TEST (HistogramTest, EmptyHistogram)
 
 TEST (HistogramTest, ClearHistogram)
 
 null_from_ (true)
 
 null_to_ (true)
 
 create_if_missing_ (false)
 
 disable_wal_ (false)
 
 bulk_load_ (false)
 
 compact_ (false)
 
 verbose_ (false)
 
 path_ ("")
 
string ReadableTime (int unixtime)
 
void IncBucketCounts (vector< uint64_t > &bucket_counts, int ttl_start, int time_range, int bucket_size, int timekv, int num_buckets)
 
void PrintBucketCounts (const vector< uint64_t > &bucket_counts, int ttl_start, int ttl_end, int bucket_size, int num_buckets)
 
 has_from_ (false)
 
 has_to_ (false)
 
 max_keys_ (-1)
 
 delim_ (".")
 
 count_only_ (false)
 
 count_delim_ (false)
 
 print_stats_ (false)
 
 is_input_key_hex_ (false)
 
 old_levels_ (1<< 16)
 
 new_levels_ (-1)
 
 print_old_levels_ (false)
 
 old_compaction_style_ (-1)
 
 new_compaction_style_ (-1)
 
 print_header_ (false)
 
 print_values_ (false)
 
 if (params.size()!=1)
 
 if (is_key_hex_)
 
 start_key_specified_ (false)
 
 end_key_specified_ (false)
 
 max_keys_scanned_ (-1)
 
void AppendNumberTo (std::string *str, uint64_t num)
 
void AppendEscapedStringTo (std::string *str, const Slice &value)
 
std::string NumberToString (uint64_t num)
 
std::string EscapeString (const Slice &value)
 
bool ConsumeChar (Slice *in, char c)
 
bool ConsumeDecimalNumber (Slice *in, uint64_t *val)
 
void StartPerfTimer (StopWatchNano *timer)
 
void BumpPerfCount (uint64_t *count, uint64_t delta=1)
 
void BumpPerfTime (uint64_t *time, StopWatchNano *timer, bool reset=true)
 
vector< string > stringSplit (string arg, char delim)
 
std::shared_ptr< DBOpenNormalDb (char delim_char)
 
std::shared_ptr< DBOpenTtlDb (char delim_char)
 
 TEST (StringAppendOperatorTest, IteratorTest)
 
 TEST (StringAppendOperatorTest, SimpleTest)
 
 TEST (StringAppendOperatorTest, SimpleDelimiterTest)
 
 TEST (StringAppendOperatorTest, OneValueNoDelimiterTest)
 
 TEST (StringAppendOperatorTest, VariousKeys)
 
 TEST (StringAppendOperatorTest, RandomMixGetAppend)
 
 TEST (StringAppendOperatorTest, BIGRandomMixGetAppend)
 
 TEST (StringAppendOperatorTest, PersistentVariousKeys)
 
 TEST (StringAppendOperatorTest, PersistentFlushAndCompaction)
 
 TEST (StringAppendOperatorTest, SimpleTestNullDelimiter)
 
void AssertListEq (const std::vector< std::string > &result, const std::vector< std::string > &expected_result)
 
 TEST (RedisListsTest, SimpleTest)
 
 TEST (RedisListsTest, SimpleTest2)
 
 TEST (RedisListsTest, IndexTest)
 
 TEST (RedisListsTest, RangeTest)
 
 TEST (RedisListsTest, InsertTest)
 
 TEST (RedisListsTest, SetTest)
 
 TEST (RedisListsTest, InsertPushSetTest)
 
 TEST (RedisListsTest, TrimPopTest)
 
 TEST (RedisListsTest, RemoveTest)
 
 TEST (RedisListsTest, PersistenceMultiKeyTest)
 
void MakeUpper (std::string *const s)
 
int manual_redis_test (bool destructive)
 
 TEST (TtlTest, NoEffect)
 
 TEST (TtlTest, PresentDuringTTL)
 
 TEST (TtlTest, AbsentAfterTTL)
 
 TEST (TtlTest, ResetTimestamp)
 
 TEST (TtlTest, IterPresentDuringTTL)
 
 TEST (TtlTest, IterAbsentAfterTTL)
 
 TEST (TtlTest, MultiOpenSamePresent)
 
 TEST (TtlTest, MultiOpenSameAbsent)
 
 TEST (TtlTest, MultiOpenDifferent)
 
 TEST (TtlTest, ReadOnlyPresentForever)
 
 TEST (TtlTest, WriteBatchTest)
 
 TEST (TtlTest, CompactionFilter)
 
 TEST (TtlTest, KeyMayExist)
 

Variables

static const int kValueSize = 1000
 
static int cfilter_count
 
static std::string NEW_VALUE = "NewValue"
 
static const ValueType kValueTypeForSeek = kTypeMerge
 
static const SequenceNumber kMaxSequenceNumber
 
static const std::string ARCHIVAL_DIR = "archive"
 
static const size_t kHeader = 12
 
static const Status notsup
 
static const int kMajorVersion = 2
 
static const int kMinorVersion = 0
 
__thread PerfContext perf_context
 
const std::vector< std::pair
< Tickers, std::string > > 
TickersNameMap
 
const std::vector< std::pair
< Histograms, std::string > > 
HistogramsNameMap
 
const size_t kMaxCacheKeyPrefixSize = kMaxVarint64Length*3+1
 
static const size_t kFilterBaseLg = 11
 
static const size_t kFilterBase = 1 << kFilterBaseLg
 
static const uint64_t kTableMagicNumber = 0xdb4775248b80fb57ull
 
static const size_t kBlockTrailerSize = 5
 
static ReverseKeyComparator reverse_key_comparator
 
static const int kVerbose = 1
 
const unsigned int kMaxVarint32Length = 5
 
const unsigned int kMaxVarint64Length = 10
 
static port::OnceType once = LEVELDB_ONCE_INIT
 
static const Comparatorbytewise
 
static const int kDelayMicros = 100000
 
 else
 
static const char *const access_hints []
 
PerfLevel perf_level = kEnableCount
 
const int kDebugLogChunkSize = 128 * 1024
 
const std::string kDbName = "/tmp/mergetestdb"
 

Detailed Description

Back-end implementation details specific to the Merge Operator.

A MergeOperator for rocksdb that implements string append.

Author
Deon Nicholas (dnich.nosp@m.olas.nosp@m.@fb.c.nosp@m.om) Copyright 2013 Facebook
Deon Nicholas (dnich.nosp@m.olas.nosp@m.@fb.c.nosp@m.om) Copyright 2013 Facebook

A TEST MergeOperator for rocksdb that implements string append. It is built using the MergeOperator interface rather than the simpler AssociativeMergeOperator interface. This is useful for testing/benchmarking. While the two operators are semantically the same, all production code should use the StringAppendOperator defined in stringappend.{h,cc}. The operator defined in the present file is primarily for testing.

Author
Deon Nicholas (dnich.nosp@m.olas.nosp@m.@fb.c.nosp@m.om) Copyright 2013 Facebook

A simple structure for exceptions in RedisLists.

Author
Deon Nicholas (dnich.nosp@m.olas.nosp@m.@fb.c.nosp@m.om) Copyright 2013 Facebook

RedisListIterator: An abstraction over the "list" concept (e.g.: for redis lists). Provides functionality to read, traverse, edit, and write these lists.

Upon construction, the RedisListIterator is given a block of list data. Internally, it stores a pointer to the data and a pointer to current item. It also stores a "result" list that will be mutated over time.

Traversal and mutation are done by "forward iteration". The Push() and Skip() methods will advance the iterator to the next item. However, Push() will also "write the current item to the result". Skip() will simply move to next item, causing current item to be dropped.

Upon completion, the result (accessible by WriteResult()) will be saved. All "skipped" items will be gone; all "pushed" items will remain.

Exceptions
Anyof the operations may throw a RedisListException if an invalid operation is performed or if the data is found to be corrupt.

By default, if WriteResult() is called part-way through iteration, it will automatically advance the iterator to the end, and Keep() all items that haven't been traversed yet. This may be subject to review.

Can access the "current" item via GetCurrent(), and other list-specific information such as Length().

The internal representation is due to change at any time. Presently, the list is represented as follows:

  • 32-bit integer header: the number of items in the list
  • For each item:
    • 32-bit int (n): the number of bytes representing this item
    • n bytes of data: the actual data.
Author
Deon Nicholas (dnich.nosp@m.olas.nosp@m.@fb.c.nosp@m.om) Copyright 2013 Facebook

A (persistent) Redis API built using the rocksdb backend. Implements Redis Lists as described on: http://redis.io/commands#list

Exceptions
Allfunctions may throw a RedisListException on error/corruption.

Internally, the set of lists is stored in a rocksdb database, mapping keys to values. Each "value" is the list itself, storing some kind of internal representation of the data. All the representation details are handled by the RedisListIterator class. The present file should be oblivious to the representation details, handling only the client (Redis) API, and the calls to rocksdb.

Presently, all operations take at least O(NV) time where N is the number of elements in the list, and V is the average number of bytes per value in the list. So maybe, with merge operator we can improve this to an optimal O(V) amortized time, since we wouldn't have to read and re-write the entire list.

Author
Deon Nicholas (dnich.nosp@m.olas.nosp@m.@fb.c.nosp@m.om) Copyright 2013 Facebook

A (persistent) Redis API built using the rocksdb backend. Implements Redis Lists as described on: http://redis.io/commands#list

Exceptions
Allfunctions may throw a RedisListException
Author
Deon Nicholas (dnich.nosp@m.olas.nosp@m.@fb.c.nosp@m.om) Copyright 2013 Facebook

Typedef Documentation

Definition at line 20 of file skiplist_test.cc.

typedef std::map<std::string, std::string, anon::STLLessThan> rocksdb::KVMap

Definition at line 159 of file table_test.cc.

typedef std::priority_queue< IteratorWrapper*, std::vector<IteratorWrapper*>, MaxIteratorComparator> rocksdb::MaxIterHeap

Definition at line 47 of file iter_heap.h.

typedef std::priority_queue< IteratorWrapper*, std::vector<IteratorWrapper*>, MinIteratorComparator> rocksdb::MinIterHeap

Definition at line 52 of file iter_heap.h.

Definition at line 16 of file types.h.

typedef std::vector<std::unique_ptr<LogFile> > rocksdb::VectorLogPtr

Definition at line 17 of file transaction_log.h.

Enumeration Type Documentation

Enumerator
kCompactionStopStyleSimilarSize 
kCompactionStopStyleTotalSize 

Definition at line 24 of file universal_compaction.h.

24  {
25  kCompactionStopStyleSimilarSize, // pick files of similar size
26  kCompactionStopStyleTotalSize // total size of picked files > next file
27 };
Enumerator
kCompactionStyleLevel 
kCompactionStyleUniversal 

Definition at line 55 of file options.h.

55  : char {
56  kCompactionStyleLevel = 0x0, // level based compaction style
57  kCompactionStyleUniversal = 0x1 // Universal compaction style
58 };
Enumerator
kNoCompression 
kSnappyCompression 
kZlibCompression 
kBZip2Compression 

Definition at line 46 of file options.h.

46  : char {
47  // NOTE: do not change the values of existing entries, as these are
48  // part of the persistent format on disk.
49  kNoCompression = 0x0,
50  kSnappyCompression = 0x1,
51  kZlibCompression = 0x2,
52  kBZip2Compression = 0x3
53 };
Enumerator
kLogFile 
kDBLockFile 
kTableFile 
kDescriptorFile 
kCurrentFile 
kTempFile 
kInfoLogFile 
kMetaDatabase 
kIdentityFile 

Definition at line 24 of file filename.h.

24  {
25  kLogFile,
27  kTableFile,
30  kTempFile,
31  kInfoLogFile, // Either the current one, or an old one
34 };

Keep adding histogram's here. Any histogram whould have value less than HISTOGRAM_ENUM_MAX Add a new Histogram by assigning it the current value of HISTOGRAM_ENUM_MAX Add a string representation in HistogramsNameMap below And increment HISTOGRAM_ENUM_MAX

Enumerator
DB_GET 
DB_WRITE 
COMPACTION_TIME 
TABLE_SYNC_MICROS 
COMPACTION_OUTFILE_SYNC_MICROS 
WAL_FILE_SYNC_MICROS 
MANIFEST_FILE_SYNC_MICROS 
TABLE_OPEN_IO_MICROS 
DB_MULTIGET 
READ_BLOCK_COMPACTION_MICROS 
READ_BLOCK_GET_MICROS 
WRITE_RAW_BLOCK_MICROS 
STALL_L0_SLOWDOWN_COUNT 
STALL_MEMTABLE_COMPACTION_COUNT 
STALL_L0_NUM_FILES_COUNT 
HARD_RATE_LIMIT_DELAY_COUNT 
SOFT_RATE_LIMIT_DELAY_COUNT 
NUM_FILES_IN_SINGLE_COMPACTION 
HISTOGRAM_ENUM_MAX 

Definition at line 165 of file statistics.h.

Enumerator
kDisable 
kEnableCount 
kEnableTime 

Definition at line 13 of file perf_context.h.

13  {
14  kDisable = 0, // disable perf stats
15  kEnableCount = 1, // enable only count stats
16  kEnableTime = 2 // enable time stats too
17 };
Enumerator
kReadAllTier 
kBlockCacheTier 

Definition at line 636 of file options.h.

636  {
637  kReadAllTier = 0x0, // data in memtable, block cache, OS cache or storage
638  kBlockCacheTier = 0x1 // data in memtable or block cache
639 };
Enumerator
kComparator 
kLogNumber 
kNextFileNumber 
kLastSequence 
kCompactPointer 
kDeletedFile 
kNewFile 
kPrevLogNumber 
kNewFile2 

Definition at line 19 of file version_edit.cc.

19  {
20  kComparator = 1,
21  kLogNumber = 2,
22  kNextFileNumber = 3,
23  kLastSequence = 4,
24  kCompactPointer = 5,
25  kDeletedFile = 6,
26  kNewFile = 7,
27  // 8 was used for large value refs
28  kPrevLogNumber = 9,
29 
30  // these are new formats divergent from open source leveldb
31  kNewFile2 = 100 // store smallest & largest seqno
32 };
Enumerator
TABLE_TEST 
BLOCK_TEST 
MEMTABLE_TEST 
DB_TEST 

Definition at line 478 of file table_test.cc.

478  {
479  TABLE_TEST,
480  BLOCK_TEST,
482  DB_TEST
483 };

Keep adding ticker's here. Any ticker should have a value less than TICKER_ENUM_MAX. Add a new ticker by assigning it the current value of TICKER_ENUM_MAX Add a string representation in TickersNameMap below. And incrementing TICKER_ENUM_MAX.

Enumerator
BLOCK_CACHE_MISS 
BLOCK_CACHE_HIT 
BLOCK_CACHE_ADD 
BLOCK_CACHE_INDEX_MISS 
BLOCK_CACHE_INDEX_HIT 
BLOCK_CACHE_FILTER_MISS 
BLOCK_CACHE_FILTER_HIT 
BLOCK_CACHE_DATA_MISS 
BLOCK_CACHE_DATA_HIT 
BLOOM_FILTER_USEFUL 
COMPACTION_KEY_DROP_NEWER_ENTRY 

COMPACTION_KEY_DROP_* count the reasons for key drop during compaction There are 3 reasons currently.

COMPACTION_KEY_DROP_OBSOLETE 
COMPACTION_KEY_DROP_USER 
NUMBER_KEYS_WRITTEN 
NUMBER_KEYS_READ 
NUMBER_KEYS_UPDATED 
BYTES_WRITTEN 
BYTES_READ 
NO_FILE_CLOSES 
NO_FILE_OPENS 
NO_FILE_ERRORS 
STALL_L0_SLOWDOWN_MICROS 
STALL_MEMTABLE_COMPACTION_MICROS 
STALL_L0_NUM_FILES_MICROS 
RATE_LIMIT_DELAY_MILLIS 
NO_ITERATORS 
NUMBER_MULTIGET_CALLS 
NUMBER_MULTIGET_KEYS_READ 
NUMBER_MULTIGET_BYTES_READ 
NUMBER_FILTERED_DELETES 
NUMBER_MERGE_FAILURES 
SEQUENCE_NUMBER 
BLOOM_FILTER_PREFIX_CHECKED 
BLOOM_FILTER_PREFIX_USEFUL 
NUMBER_OF_RESEEKS_IN_ITERATION 
GET_UPDATES_SINCE_CALLS 
BLOCK_CACHE_COMPRESSED_MISS 
BLOCK_CACHE_COMPRESSED_HIT 
TICKER_ENUM_MAX 

Definition at line 26 of file statistics.h.

26  {
27  // total block cache misses
28  // REQUIRES: BLOCK_CACHE_MISS == BLOCK_CACHE_INDEX_MISS +
29  // BLOCK_CACHE_FILTER_MISS +
30  // BLOCK_CACHE_DATA_MISS;
32  // total block cache hit
33  // REQUIRES: BLOCK_CACHE_HIT == BLOCK_CACHE_INDEX_HIT +
34  // BLOCK_CACHE_FILTER_HIT +
35  // BLOCK_CACHE_DATA_HIT;
37  // # of blocks added to block cache.
39  // # of times cache miss when accessing index block from block cache.
41  // # of times cache hit when accessing index block from block cache.
43  // # of times cache miss when accessing filter block from block cache.
45  // # of times cache hit when accessing filter block from block cache.
47  // # of times cache miss when accessing data block from block cache.
49  // # of times cache hit when accessing data block from block cache.
51  // # of times bloom filter has avoided file reads.
53 
58  COMPACTION_KEY_DROP_NEWER_ENTRY, // key was written with a newer value.
59  COMPACTION_KEY_DROP_OBSOLETE, // The key is obsolete.
60  COMPACTION_KEY_DROP_USER, // user compaction function has dropped the key.
61 
62  // Number of keys written to the database via the Put and Write call's
64  // Number of Keys read,
66  // Number keys updated, if inplace update is enabled
68  // Bytes written / read
70  BYTES_READ,
74  // Time system had to wait to do LO-L1 compactions
76  // Time system had to wait to move memtable to L1.
78  // write throttle because of too many files in L0
81 
82  NO_ITERATORS, // number of iterators currently open
83 
84  // Number of MultiGet calls, keys read, and bytes read
88 
89  // Number of deletes records that were not required to be
90  // written to storage because key does not exist
94 
95  // number of times bloom was checked before creating iterator on a
96  // file, and the number of times the check was useful in avoiding
97  // iterator creation (and thus likely IOPs).
100 
101  // Number of times we had to reseek inside an iteration to skip
102  // over large number of keys with same userkey.
104 
105  // Record the number of calls to GetUpadtesSince. Useful to keep track of
106  // transaction log iterator refreshes
108 
109  BLOCK_CACHE_COMPRESSED_MISS, // miss in the compressed block cache
110  BLOCK_CACHE_COMPRESSED_HIT, // hit in the compressed block cache
111 
113 };
Enumerator
kTypeDeletion 
kTypeValue 
kTypeMerge 
kTypeLogData 

Definition at line 28 of file dbformat.h.

28  {
29  kTypeDeletion = 0x0,
30  kTypeValue = 0x1,
31  kTypeMerge = 0x2,
32  kTypeLogData = 0x3
33 };
Enumerator
kArchivedLogFile 
kAliveLogFile 

Definition at line 20 of file transaction_log.h.

20  {
21  /* Indicates that WAL file is in archive directory. WAL files are moved from
22  * the main db directory to archive directory once they are not live and stay
23  * there until cleaned up. Files are cleaned depending on archive size
24  * (Options::WAL_size_limit_MB) and time since last cleaning
25  * (Options::WAL_ttl_seconds).
26  */
27  kArchivedLogFile = 0,
28 
29  /* Indicates that WAL file is live and resides in the main db directory */
30  kAliveLogFile = 1
31 } ;

Function Documentation

static bool rocksdb::AfterFile ( const Comparator *  ucmp,
const Slice *  user_key,
const FileMetaData *  f 
)
static

Definition at line 81 of file version_set.cc.

References rocksdb::Comparator::Compare(), rocksdb::FileMetaData::largest, and rocksdb::InternalKey::user_key().

Referenced by SomeFileOverlapsRange().

82  {
83  // nullptr user_key occurs before all keys and is therefore never after *f
84  return (user_key != nullptr &&
85  ucmp->Compare(*user_key, f->largest.user_key()) > 0);
86 }

Here is the call graph for this function:

Here is the caller graph for this function:

void rocksdb::AppendEscapedStringTo ( std::string *  str,
const Slice &  value 
)

Definition at line 27 of file logging.cc.

References rocksdb::Slice::size().

Referenced by EscapeString().

27  {
28  for (size_t i = 0; i < value.size(); i++) {
29  char c = value[i];
30  if (c >= ' ' && c <= '~') {
31  str->push_back(c);
32  } else {
33  char buf[10];
34  snprintf(buf, sizeof(buf), "\\x%02x",
35  static_cast<unsigned int>(c) & 0xff);
36  str->append(buf);
37  }
38  }
39 }

Here is the call graph for this function:

Here is the caller graph for this function:

void rocksdb::AppendInternalKey ( std::string *  result,
const ParsedInternalKey &  key 
)

Definition at line 24 of file dbformat.cc.

References rocksdb::Slice::data(), PackSequenceAndType(), PutFixed64(), rocksdb::ParsedInternalKey::sequence, rocksdb::Slice::size(), rocksdb::ParsedInternalKey::type, and rocksdb::ParsedInternalKey::user_key.

Referenced by IKey(), rocksdb::InternalKey::InternalKey(), rocksdb::KeyConvertingIterator::Seek(), and rocksdb::InternalKey::SetFrom().

24  {
25  result->append(key.user_key.data(), key.user_key.size());
26  PutFixed64(result, PackSequenceAndType(key.sequence, key.type));
27 }

Here is the call graph for this function:

Here is the caller graph for this function:

void rocksdb::AppendNumberTo ( std::string *  str,
uint64_t  num 
)

Definition at line 21 of file logging.cc.

Referenced by rocksdb::VersionEdit::DebugString(), rocksdb::Version::DebugString(), rocksdb::TtlTest::MakeKVMap(), and NumberToString().

21  {
22  char buf[30];
23  snprintf(buf, sizeof(buf), "%llu", (unsigned long long) num);
24  str->append(buf);
25 }

Here is the caller graph for this function:

static void rocksdb::AppendWithSpace ( std::string *  str,
Slice  msg 
)
static

Definition at line 528 of file db_bench.cc.

References rocksdb::Slice::data(), rocksdb::Slice::empty(), and rocksdb::Slice::size().

Referenced by rocksdb::Stats::AddMessage(), and rocksdb::Stats::Report().

528  {
529  if (msg.empty()) return;
530  if (!str->empty()) {
531  str->push_back(' ');
532  }
533  str->append(msg.data(), msg.size());
534 }

Here is the call graph for this function:

Here is the caller graph for this function:

std::string rocksdb::ArchivalDirectory ( const std::string &  dir)

Definition at line 65 of file filename.cc.

References ARCHIVAL_DIR.

Referenced by rocksdb::DBImpl::CreateArchivalDirectory(), DestroyDB(), rocksdb::DBImpl::GetSortedWalFiles(), rocksdb::DBImpl::PurgeObsoleteWALFiles(), and TEST().

65  {
66  return dir + "/" + ARCHIVAL_DIR;
67 }

Here is the caller graph for this function:

std::string rocksdb::ArchivedLogFileName ( const std::string &  name,
uint64_t  number 
)

Definition at line 68 of file filename.cc.

References ARCHIVAL_DIR, and MakeFileName().

Referenced by rocksdb::DBImpl::CheckWalFileExistsAndEmpty(), rocksdb::TransactionLogIteratorImpl::OpenLogFile(), rocksdb::LogFileImpl::PathName(), rocksdb::DBImpl::PurgeObsoleteFiles(), and rocksdb::DBImpl::ReadFirstRecord().

68  {
69  assert(number > 0);
70  return MakeFileName(name + "/" + ARCHIVAL_DIR, number, "log");
71 }

Here is the call graph for this function:

Here is the caller graph for this function:

void rocksdb::AssertListEq ( const std::vector< std::string > &  result,
const std::vector< std::string > &  expected_result 
)

Definition at line 44 of file redis_lists_test.cc.

References ASSERT_EQ.

Referenced by TEST().

45  {
46  ASSERT_EQ(result.size(), expected_result.size());
47  for (size_t i = 0; i < result.size(); ++i) {
48  ASSERT_EQ(result[i], expected_result[i]);
49  }
50 }

Here is the caller graph for this function:

static bool rocksdb::BeforeFile ( const Comparator *  ucmp,
const Slice *  user_key,
const FileMetaData *  f 
)
static

Definition at line 88 of file version_set.cc.

References rocksdb::Comparator::Compare(), rocksdb::FileMetaData::smallest, and rocksdb::InternalKey::user_key().

Referenced by SomeFileOverlapsRange().

89  {
90  // nullptr user_key occurs after all keys and is therefore never before *f
91  return (user_key != nullptr &&
92  ucmp->Compare(*user_key, f->smallest.user_key()) < 0);
93 }

Here is the call graph for this function:

Here is the caller graph for this function:

static bool rocksdb::Between ( uint64_t  val,
uint64_t  low,
uint64_t  high 
)
static

Definition at line 789 of file table_test.cc.

789  {
790  bool result = (val >= low) && (val <= high);
791  if (!result) {
792  fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
793  (unsigned long long)(val),
794  (unsigned long long)(low),
795  (unsigned long long)(high));
796  }
797  return result;
798 }
static bool rocksdb::Between ( uint64_t  val,
uint64_t  low,
uint64_t  high 
)
static

Definition at line 2861 of file db_test.cc.

Referenced by Do_Compression_Test(), and TEST().

2861  {
2862  bool result = (val >= low) && (val <= high);
2863  if (!result) {
2864  fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
2865  (unsigned long long)(val),
2866  (unsigned long long)(low),
2867  (unsigned long long)(high));
2868  }
2869  return result;
2870 }

Here is the caller graph for this function:

uint64_t rocksdb::BitStreamGetInt ( const char *  src,
size_t  srclen,
size_t  offset,
uint32_t  bits 
)

Definition at line 253 of file coding.cc.

Referenced by BitStreamGetInt(), BitStreamPutInt(), and TEST().

254  {
255  assert((offset + bits + 7)/8 <= srclen);
256  assert(bits <= 64);
257 
258  const unsigned char* ptr = reinterpret_cast<const unsigned char*>(src);
259 
260  uint64_t result = 0;
261 
262  size_t byteOffset = offset / 8;
263  size_t bitOffset = offset % 8;
264  size_t shift = 0;
265 
266  while (bits > 0) {
267  size_t bitsToGet = std::min<size_t>(bits, 8 - bitOffset);
268  unsigned char mask = ((1 << bitsToGet) - 1);
269 
270  result += (uint64_t)((ptr[byteOffset] >> bitOffset) & mask) << shift;
271 
272  shift += bitsToGet;
273  byteOffset += 1;
274  bitOffset = 0;
275  bits -= bitsToGet;
276  }
277 
278  return result;
279 }

Here is the caller graph for this function:

uint64_t rocksdb::BitStreamGetInt ( const std::string *  src,
size_t  offset,
uint32_t  bits 
)

Definition at line 308 of file coding.cc.

References BitStreamGetInt().

309  {
310  return BitStreamGetInt(src->data(), src->size(), offset, bits);
311 }

Here is the call graph for this function:

uint64_t rocksdb::BitStreamGetInt ( const Slice *  src,
size_t  offset,
uint32_t  bits 
)

Definition at line 313 of file coding.cc.

References BitStreamGetInt(), rocksdb::Slice::data(), and rocksdb::Slice::size().

314  {
315  return BitStreamGetInt(src->data(), src->size(), offset, bits);
316 }

Here is the call graph for this function:

void rocksdb::BitStreamPutInt ( char *  dst,
size_t  dstlen,
size_t  offset,
uint32_t  bits,
uint64_t  value 
)

Definition at line 220 of file coding.cc.

References BitStreamGetInt(), and value.

Referenced by BitStreamPutInt(), and TEST().

221  {
222  assert((offset + bits + 7)/8 <= dstlen);
223  assert(bits <= 64);
224 
225  unsigned char* ptr = reinterpret_cast<unsigned char*>(dst);
226 
227  size_t byteOffset = offset / 8;
228  size_t bitOffset = offset % 8;
229 
230  // This prevents unused variable warnings when compiling.
231 #ifndef NDEBUG
232  // Store truncated value.
233  uint64_t origValue = (bits < 64)?(value & (((uint64_t)1 << bits) - 1)):value;
234  uint32_t origBits = bits;
235 #endif
236 
237  while (bits > 0) {
238  size_t bitsToGet = std::min<size_t>(bits, 8 - bitOffset);
239  unsigned char mask = ((1 << bitsToGet) - 1);
240 
241  ptr[byteOffset] = (ptr[byteOffset] & ~(mask << bitOffset)) +
242  ((value & mask) << bitOffset);
243 
244  value >>= bitsToGet;
245  byteOffset += 1;
246  bitOffset = 0;
247  bits -= bitsToGet;
248  }
249 
250  assert(origValue == BitStreamGetInt(dst, dstlen, offset, origBits));
251 }

Here is the call graph for this function:

Here is the caller graph for this function:

void rocksdb::BitStreamPutInt ( std::string *  dst,
size_t  offset,
uint32_t  bits,
uint64_t  value 
)

Definition at line 281 of file coding.cc.

References BitStreamGetInt(), BitStreamPutInt(), and value.

282  {
283  assert((offset + bits + 7)/8 <= dst->size());
284 
285  const size_t kTmpBufLen = sizeof(value) + 1;
286  char tmpBuf[kTmpBufLen];
287 
288  // Number of bytes of tmpBuf being used
289  const size_t kUsedBytes = (offset%8 + bits)/8;
290 
291  // Copy relevant parts of dst to tmpBuf
292  for (size_t idx = 0; idx <= kUsedBytes; ++idx) {
293  tmpBuf[idx] = (*dst)[offset/8 + idx];
294  }
295 
296  BitStreamPutInt(tmpBuf, kTmpBufLen, offset%8, bits, value);
297 
298  // Copy tmpBuf back to dst
299  for (size_t idx = 0; idx <= kUsedBytes; ++idx) {
300  (*dst)[offset/8 + idx] = tmpBuf[idx];
301  }
302 
303  // Do the check here too as we are working with a buffer.
304  assert(((bits < 64)?(value & (((uint64_t)1 << bits) - 1)):value) ==
305  BitStreamGetInt(dst, offset, bits));
306 }

Here is the call graph for this function:

void rocksdb::BM_LogAndApply ( int  iters,
int  num_base_files 
)

Definition at line 4863 of file db_test.cc.

References ASSERT_OK, ASSERT_TRUE, BytewiseComparator(), rocksdb::Options::create_if_missing, db, dbname, rocksdb::Env::Default(), rocksdb::VersionEdit::DeleteFile(), DestroyDB(), env, kTypeDeletion, kTypeValue, rocksdb::VersionSet::LogAndApply(), MakeKey(), mu, rocksdb::Env::NowMicros(), rocksdb::VersionSet::NumberLevels(), rocksdb::DB::Open(), rocksdb::VersionSet::Recover(), and rocksdb::test::TmpDir().

Referenced by main().

4863  {
4864  std::string dbname = test::TmpDir() + "/rocksdb_test_benchmark";
4865  ASSERT_OK(DestroyDB(dbname, Options()));
4866 
4867  DB* db = nullptr;
4868  Options opts;
4869  opts.create_if_missing = true;
4870  Status s = DB::Open(opts, dbname, &db);
4871  ASSERT_OK(s);
4872  ASSERT_TRUE(db != nullptr);
4873 
4874  delete db;
4875  db = nullptr;
4876 
4877  Env* env = Env::Default();
4878 
4879  port::Mutex mu;
4880  MutexLock l(&mu);
4881 
4882  InternalKeyComparator cmp(BytewiseComparator());
4883  Options options;
4884  EnvOptions sopt;
4885  VersionSet vset(dbname, &options, sopt, nullptr, &cmp);
4886  ASSERT_OK(vset.Recover());
4887  VersionEdit vbase(vset.NumberLevels());
4888  uint64_t fnum = 1;
4889  for (int i = 0; i < num_base_files; i++) {
4890  InternalKey start(MakeKey(2*fnum), 1, kTypeValue);
4891  InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion);
4892  vbase.AddFile(2, fnum++, 1 /* file size */, start, limit, 1, 1);
4893  }
4894  ASSERT_OK(vset.LogAndApply(&vbase, &mu));
4895 
4896  uint64_t start_micros = env->NowMicros();
4897 
4898  for (int i = 0; i < iters; i++) {
4899  VersionEdit vedit(vset.NumberLevels());
4900  vedit.DeleteFile(2, fnum);
4901  InternalKey start(MakeKey(2*fnum), 1, kTypeValue);
4902  InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion);
4903  vedit.AddFile(2, fnum++, 1 /* file size */, start, limit, 1, 1);
4904  vset.LogAndApply(&vedit, &mu);
4905  }
4906  uint64_t stop_micros = env->NowMicros();
4907  unsigned int us = stop_micros - start_micros;
4908  char buf[16];
4909  snprintf(buf, sizeof(buf), "%d", num_base_files);
4910  fprintf(stderr,
4911  "BM_LogAndApply/%-6s %8d iters : %9u us (%7.0f us / iter)\n",
4912  buf, iters, us, ((float)us) / iters);
4913 }

Here is the call graph for this function:

Here is the caller graph for this function:

Status rocksdb::BuildTable ( const std::string &  dbname,
Env *  env,
const Options &  options,
const EnvOptions &  soptions,
TableCache *  table_cache,
Iterator *  iter,
FileMetaData *  meta,
const Comparator *  user_comparator,
const SequenceNumber  newest_snapshot,
const SequenceNumber  earliest_seqno_in_memtable,
const bool  enable_compression 
)

Definition at line 35 of file builder.cc.

References rocksdb::TableBuilder::Abandon(), rocksdb::TableBuilder::Add(), rocksdb::WritableFile::Close(), rocksdb::Comparator::Compare(), rocksdb::Options::compression, rocksdb::Slice::data(), rocksdb::InternalKey::DecodeFrom(), rocksdb::Env::DeleteFile(), rocksdb::Options::disableDataSync, rocksdb::FileMetaData::file_size, rocksdb::TableBuilder::FileSize(), rocksdb::TableBuilder::Finish(), rocksdb::WritableFile::Fsync(), GetInternalKeySeqno(), GetTableBuilder(), rocksdb::Options::info_log, rocksdb::Iterator::key(), kTypeMerge, rocksdb::FileMetaData::largest, rocksdb::FileMetaData::largest_seqno, ripple::max(), rocksdb::Options::merge_operator, ripple::min(), rocksdb::TableCache::NewIterator(), rocksdb::Env::NewWritableFile(), rocksdb::Iterator::Next(), rocksdb::FileMetaData::number, ripple::Resource::ok, rocksdb::Status::ok(), ParseInternalKey(), rocksdb::Options::purge_redundant_kvs_while_flush, rocksdb::Iterator::SeekToFirst(), rocksdb::ParsedInternalKey::sequence, rocksdb::Slice::size(), rocksdb::FileMetaData::smallest, rocksdb::FileMetaData::smallest_seqno, rocksdb::Options::statistics, rocksdb::Iterator::status(), rocksdb::WritableFile::Sync(), TABLE_SYNC_MICROS, TableFileName(), rocksdb::ParsedInternalKey::type, rocksdb::Options::use_fsync, rocksdb::ParsedInternalKey::user_key, rocksdb::Iterator::Valid(), rocksdb::Iterator::value(), and value.

Referenced by rocksdb::DBImpl::WriteLevel0Table(), and rocksdb::DBImpl::WriteLevel0TableForRecovery().

45  {
46  Status s;
47  meta->file_size = 0;
48  meta->smallest_seqno = meta->largest_seqno = 0;
49  iter->SeekToFirst();
50 
51  // If the sequence number of the smallest entry in the memtable is
52  // smaller than the most recent snapshot, then we do not trigger
53  // removal of duplicate/deleted keys as part of this builder.
54  bool purge = options.purge_redundant_kvs_while_flush;
55  if (earliest_seqno_in_memtable <= newest_snapshot) {
56  purge = false;
57  }
58 
59  std::string fname = TableFileName(dbname, meta->number);
60  if (iter->Valid()) {
61  unique_ptr<WritableFile> file;
62  s = env->NewWritableFile(fname, &file, soptions);
63  if (!s.ok()) {
64  return s;
65  }
66 
67  TableBuilder* builder = GetTableBuilder(options, file.get(),
68  options.compression);
69 
70  // the first key is the smallest key
71  Slice key = iter->key();
72  meta->smallest.DecodeFrom(key);
73  meta->smallest_seqno = GetInternalKeySeqno(key);
74  meta->largest_seqno = meta->smallest_seqno;
75 
76  MergeHelper merge(user_comparator, options.merge_operator.get(),
77  options.info_log.get(),
78  true /* internal key corruption is not ok */);
79 
80  if (purge) {
81  // Ugly walkaround to avoid compiler error for release build
82  bool ok __attribute__((unused)) = true;
83 
84  // Will write to builder if current key != prev key
85  ParsedInternalKey prev_ikey;
86  std::string prev_key;
87  bool is_first_key = true; // Also write if this is the very first key
88 
89  while (iter->Valid()) {
90  bool iterator_at_next = false;
91 
92  // Get current key
93  ParsedInternalKey this_ikey;
94  Slice key = iter->key();
95  Slice value = iter->value();
96 
97  // In-memory key corruption is not ok;
98  // TODO: find a clean way to treat in memory key corruption
99  ok = ParseInternalKey(key, &this_ikey);
100  assert(ok);
101  assert(this_ikey.sequence >= earliest_seqno_in_memtable);
102 
103  // If the key is the same as the previous key (and it is not the
104  // first key), then we skip it, since it is an older version.
105  // Otherwise we output the key and mark it as the "new" previous key.
106  if (!is_first_key && !user_comparator->Compare(prev_ikey.user_key,
107  this_ikey.user_key)) {
108  // seqno within the same key are in decreasing order
109  assert(this_ikey.sequence < prev_ikey.sequence);
110  } else {
111  is_first_key = false;
112 
113  if (this_ikey.type == kTypeMerge) {
114  // Handle merge-type keys using the MergeHelper
115  merge.MergeUntil(iter, 0 /* don't worry about snapshot */);
116  iterator_at_next = true;
117  if (merge.IsSuccess()) {
118  // Merge completed correctly.
119  // Add the resulting merge key/value and continue to next
120  builder->Add(merge.key(), merge.value());
121  prev_key.assign(merge.key().data(), merge.key().size());
122  ok = ParseInternalKey(Slice(prev_key), &prev_ikey);
123  assert(ok);
124  } else {
125  // Merge did not find a Put/Delete.
126  // Can not compact these merges into a kValueType.
127  // Write them out one-by-one. (Proceed back() to front())
128  const std::deque<std::string>& keys = merge.keys();
129  const std::deque<std::string>& values = merge.values();
130  assert(keys.size() == values.size() && keys.size() >= 1);
131  std::deque<std::string>::const_reverse_iterator key_iter;
132  std::deque<std::string>::const_reverse_iterator value_iter;
133  for (key_iter=keys.rbegin(), value_iter = values.rbegin();
134  key_iter != keys.rend() && value_iter != values.rend();
135  ++key_iter, ++value_iter) {
136 
137  builder->Add(Slice(*key_iter), Slice(*value_iter));
138  }
139 
140  // Sanity check. Both iterators should end at the same time
141  assert(key_iter == keys.rend() && value_iter == values.rend());
142 
143  prev_key.assign(keys.front());
144  ok = ParseInternalKey(Slice(prev_key), &prev_ikey);
145  assert(ok);
146  }
147  } else {
148  // Handle Put/Delete-type keys by simply writing them
149  builder->Add(key, value);
150  prev_key.assign(key.data(), key.size());
151  ok = ParseInternalKey(Slice(prev_key), &prev_ikey);
152  assert(ok);
153  }
154  }
155 
156  if (!iterator_at_next) iter->Next();
157  }
158 
159  // The last key is the largest key
160  meta->largest.DecodeFrom(Slice(prev_key));
161  SequenceNumber seqno = GetInternalKeySeqno(Slice(prev_key));
162  meta->smallest_seqno = std::min(meta->smallest_seqno, seqno);
163  meta->largest_seqno = std::max(meta->largest_seqno, seqno);
164 
165  } else {
166  for (; iter->Valid(); iter->Next()) {
167  Slice key = iter->key();
168  meta->largest.DecodeFrom(key);
169  builder->Add(key, iter->value());
170  SequenceNumber seqno = GetInternalKeySeqno(key);
171  meta->smallest_seqno = std::min(meta->smallest_seqno, seqno);
172  meta->largest_seqno = std::max(meta->largest_seqno, seqno);
173  }
174  }
175 
176  // Finish and check for builder errors
177  if (s.ok()) {
178  s = builder->Finish();
179  if (s.ok()) {
180  meta->file_size = builder->FileSize();
181  assert(meta->file_size > 0);
182  }
183  } else {
184  builder->Abandon();
185  }
186  delete builder;
187 
188  // Finish and check for file errors
189  if (s.ok() && !options.disableDataSync) {
190  if (options.use_fsync) {
191  StopWatch sw(env, options.statistics, TABLE_SYNC_MICROS);
192  s = file->Fsync();
193  } else {
194  StopWatch sw(env, options.statistics, TABLE_SYNC_MICROS);
195  s = file->Sync();
196  }
197  }
198  if (s.ok()) {
199  s = file->Close();
200  }
201 
202  if (s.ok()) {
203  // Verify that the table is usable
204  Iterator* it = table_cache->NewIterator(ReadOptions(),
205  soptions,
206  meta->number,
207  meta->file_size);
208  s = it->status();
209  delete it;
210  }
211  }
212 
213  // Check for input iterator errors
214  if (!iter->status().ok()) {
215  s = iter->status();
216  }
217 
218  if (s.ok() && meta->file_size > 0) {
219  // Keep it
220  } else {
221  env->DeleteFile(fname);
222  }
223  return s;
224 }

Here is the call graph for this function:

Here is the caller graph for this function:

rocksdb::bulk_load_ ( false  )

Referenced by compact_().

Here is the caller graph for this function:

void rocksdb::BumpPerfCount ( uint64_t count,
uint64_t  delta = 1 
)
inline

Definition at line 20 of file perf_context_imp.h.

References kEnableCount, and perf_level.

Referenced by rocksdb::BlockBasedTable::BlockReader(), rocksdb::InternalKeyComparator::Compare(), and ReadBlockContents().

20  {
22  *count += delta;
23  }
24 }

Here is the caller graph for this function:

void rocksdb::BumpPerfTime ( uint64_t time,
StopWatchNano *  timer,
bool  reset = true 
)
inline

Definition at line 26 of file perf_context_imp.h.

References rocksdb::StopWatchNano::ElapsedNanos(), kEnableTime, and perf_level.

Referenced by ReadBlockContents(), and rocksdb::DBImpl::Write().

28  {
30  *time += timer->ElapsedNanos(reset);
31  }
32 }

Here is the call graph for this function:

Here is the caller graph for this function:

static bool rocksdb::BZip2CompressionSupported ( const CompressionOptions &  options)
static

Definition at line 46 of file db_test.cc.

References rocksdb::port::BZip2_Compress(), rocksdb::Slice::data(), Json::in(), and rocksdb::Slice::size().

Referenced by GenerateArgList(), and MinLevelToCompress().

46  {
47  std::string out;
48  Slice in = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
49  return port::BZip2_Compress(options, in.data(), in.size(), &out);
50 }

Here is the call graph for this function:

Here is the caller graph for this function:

template<class T , class V >
static void rocksdb::ClipToRange ( T ptr,
minvalue,
maxvalue 
)
static

Definition at line 116 of file db_impl.cc.

Referenced by SanitizeOptions().

116  {
117  if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
118  if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
119 }

Here is the caller graph for this function:

rocksdb::compact_ ( false  )

Definition at line 399 of file ldb_cmd.cc.

References bulk_load_(), create_if_missing_(), and disable_wal_().

399  {
400 
401  create_if_missing_ = IsFlagPresent(flags, ARG_CREATE_IF_MISSING);
402  disable_wal_ = IsFlagPresent(flags, ARG_DISABLE_WAL);
403  bulk_load_ = IsFlagPresent(flags, ARG_BULK_LOAD);
404  compact_ = IsFlagPresent(flags, ARG_COMPACT);
405 }

Here is the call graph for this function:

static bool rocksdb::CompareIterators ( int  step,
DB *  model,
DB *  db,
const Snapshot *  model_snap,
const Snapshot *  db_snap 
)
static

Definition at line 4542 of file db_test.cc.

References rocksdb::Slice::compare(), EscapeString(), rocksdb::Iterator::key(), rocksdb::DB::NewIterator(), rocksdb::Iterator::Next(), ripple::Resource::ok, rocksdb::Iterator::SeekToFirst(), rocksdb::ReadOptions::snapshot, rocksdb::Iterator::Valid(), and rocksdb::Iterator::value().

Referenced by TEST().

4546  {
4547  ReadOptions options;
4548  options.snapshot = model_snap;
4549  Iterator* miter = model->NewIterator(options);
4550  options.snapshot = db_snap;
4551  Iterator* dbiter = db->NewIterator(options);
4552  bool ok = true;
4553  int count = 0;
4554  for (miter->SeekToFirst(), dbiter->SeekToFirst();
4555  ok && miter->Valid() && dbiter->Valid();
4556  miter->Next(), dbiter->Next()) {
4557  count++;
4558  if (miter->key().compare(dbiter->key()) != 0) {
4559  fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n",
4560  step,
4561  EscapeString(miter->key()).c_str(),
4562  EscapeString(dbiter->key()).c_str());
4563  ok = false;
4564  break;
4565  }
4566 
4567  if (miter->value().compare(dbiter->value()) != 0) {
4568  fprintf(stderr, "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n",
4569  step,
4570  EscapeString(miter->key()).c_str(),
4571  EscapeString(miter->value()).c_str(),
4572  EscapeString(miter->value()).c_str());
4573  ok = false;
4574  }
4575  }
4576 
4577  if (ok) {
4578  if (miter->Valid() != dbiter->Valid()) {
4579  fprintf(stderr, "step %d: Mismatch at end of iterators: %d vs. %d\n",
4580  step, miter->Valid(), dbiter->Valid());
4581  ok = false;
4582  }
4583  }
4584  delete miter;
4585  delete dbiter;
4586  return ok;
4587 }

Here is the call graph for this function:

Here is the caller graph for this function:

static bool rocksdb::compareSeqnoDescending ( const VersionSet::Fsize &  first,
const VersionSet::Fsize &  second 
)
static

Definition at line 1765 of file version_set.cc.

References rocksdb::VersionSet::fsize::file, rocksdb::FileMetaData::largest_seqno, and rocksdb::FileMetaData::smallest_seqno.

Referenced by rocksdb::VersionSet::UpdateFilesBySize().

1766  {
1767  if (first.file->smallest_seqno > second.file->smallest_seqno) {
1768  assert(first.file->largest_seqno > second.file->largest_seqno);
1769  return true;
1770  }
1771  assert(first.file->largest_seqno <= second.file->largest_seqno);
1772  return false;
1773 }

Here is the caller graph for this function:

static bool rocksdb::compareSizeDescending ( const VersionSet::Fsize &  first,
const VersionSet::Fsize &  second 
)
static

Definition at line 1759 of file version_set.cc.

References rocksdb::VersionSet::fsize::file, and rocksdb::FileMetaData::file_size.

Referenced by rocksdb::VersionSet::UpdateFilesBySize().

1760  {
1761  return (first.file->file_size > second.file->file_size);
1762 }

Here is the caller graph for this function:

static void rocksdb::ConcurrentReader ( void *  arg)
static

Definition at line 341 of file skiplist_test.cc.

References rocksdb::port::AtomicPointer::Acquire_Load(), arg, rocksdb::TestState::Change(), rocksdb::TestState::DONE, rocksdb::TestState::quit_flag_, rocksdb::ConcurrentTest::ReadStep(), rocksdb::TestState::RUNNING, rocksdb::TestState::seed_, and rocksdb::TestState::t_.

Referenced by RunConcurrent().

341  {
342  TestState* state = reinterpret_cast<TestState*>(arg);
343  Random rnd(state->seed_);
344  int64_t reads = 0;
345  state->Change(TestState::RUNNING);
346  while (!state->quit_flag_.Acquire_Load()) {
347  state->t_.ReadStep(&rnd);
348  ++reads;
349  }
350  state->Change(TestState::DONE);
351 }

Here is the call graph for this function:

Here is the caller graph for this function:

bool rocksdb::ConsumeChar ( Slice *  in,
char  c 
)

Definition at line 53 of file logging.cc.

References rocksdb::Slice::empty(), and rocksdb::Slice::remove_prefix().

53  {
54  if (!in->empty() && (*in)[0] == c) {
55  in->remove_prefix(1);
56  return true;
57  } else {
58  return false;
59  }
60 }

Here is the call graph for this function:

bool rocksdb::ConsumeDecimalNumber ( Slice *  in,
uint64_t val 
)

Definition at line 62 of file logging.cc.

References rocksdb::Slice::empty(), and rocksdb::Slice::remove_prefix().

Referenced by rocksdb::CorruptionTest::Check(), rocksdb::DBImpl::GetProperty(), and ParseFileName().

62  {
63  uint64_t v = 0;
64  int digits = 0;
65  while (!in->empty()) {
66  char c = (*in)[0];
67  if (c >= '0' && c <= '9') {
68  ++digits;
69  const unsigned int delta = (c - '0');
70  static const uint64_t kMaxUint64 = ~static_cast<uint64_t>(0);
71  if (v > kMaxUint64/10 ||
72  (v == kMaxUint64/10 && delta > kMaxUint64%10)) {
73  // Overflow
74  return false;
75  }
76  v = (v * 10) + delta;
77  in->remove_prefix(1);
78  } else {
79  break;
80  }
81  }
82  *val = v;
83  return (digits > 0);
84 }

Here is the call graph for this function:

Here is the caller graph for this function:

rocksdb::count_delim_ ( false  )

Referenced by is_input_key_hex_(), and print_stats_().

Here is the caller graph for this function:

rocksdb::count_only_ ( false  )

Referenced by is_input_key_hex_(), and print_stats_().

Here is the caller graph for this function:

rocksdb::create_if_missing_ ( false  )

Referenced by compact_().

Here is the caller graph for this function:

std::shared_ptr< Statistics > rocksdb::CreateDBStatistics ( )

Definition at line 61 of file db_statistics.h.

Referenced by main(), and TEST().

61  {
62  return std::make_shared<DBStatistics>();
63 }

Here is the caller graph for this function:

Status rocksdb::CreateLoggerFromOptions ( const std::string &  dbname,
const std::string &  db_log_dir,
Env *  env,
const Options &  options,
std::shared_ptr< Logger > *  logger 
)

Definition at line 76 of file auto_roll_logger.cc.

References rocksdb::Env::CreateDir(), rocksdb::Env::GetAbsolutePath(), rocksdb::AutoRollLogger::GetStatus(), InfoLogFileName(), rocksdb::Options::log_file_time_to_roll, rocksdb::Options::max_log_file_size, rocksdb::Env::NewLogger(), rocksdb::Env::NowMicros(), rocksdb::Status::ok(), OldInfoLogFileName(), and rocksdb::Env::RenameFile().

Referenced by SanitizeOptions(), and TEST().

81  {
82  std::string db_absolute_path;
83  env->GetAbsolutePath(dbname, &db_absolute_path);
84  std::string fname = InfoLogFileName(dbname, db_absolute_path, db_log_dir);
85 
86  // Currently we only support roll by time-to-roll and log size
87  if (options.log_file_time_to_roll > 0 || options.max_log_file_size > 0) {
88  AutoRollLogger* result = new AutoRollLogger(
89  env, dbname, db_log_dir,
90  options.max_log_file_size,
91  options.log_file_time_to_roll);
92  Status s = result->GetStatus();
93  if (!s.ok()) {
94  delete result;
95  } else {
96  logger->reset(result);
97  }
98  return s;
99  } else {
100  // Open a log file in the same directory as the db
101  env->CreateDir(dbname); // In case it does not exist
102  env->RenameFile(fname, OldInfoLogFileName(dbname, env->NowMicros(),
103  db_absolute_path, db_log_dir));
104  return env->NewLogger(fname, logger);
105  }
106 }

Here is the call graph for this function:

Here is the caller graph for this function:

std::string rocksdb::CurrentFileName ( const std::string &  dbname)

Definition at line 86 of file filename.cc.

Referenced by rocksdb::DBImpl::GetLiveFiles(), rocksdb::DBImpl::Recover(), rocksdb::VersionSet::Recover(), SetCurrentFile(), and TEST().

86  {
87  return dbname + "/CURRENT";
88 }

Here is the caller graph for this function:

static const char* rocksdb::DecodeEntry ( const char *  p,
const char *  limit,
uint32_t shared,
uint32_t non_shared,
uint32_t value_length 
)
inlinestatic

Definition at line 59 of file block.cc.

References GetVarint32Ptr().

Referenced by rocksdb::Block::Iter::ParseNextKey(), and rocksdb::Block::Iter::Seek().

62  {
63  if (limit - p < 3) return nullptr;
64  *shared = reinterpret_cast<const unsigned char*>(p)[0];
65  *non_shared = reinterpret_cast<const unsigned char*>(p)[1];
66  *value_length = reinterpret_cast<const unsigned char*>(p)[2];
67  if ((*shared | *non_shared | *value_length) < 128) {
68  // Fast path: all three values are encoded in one byte each
69  p += 3;
70  } else {
71  if ((p = GetVarint32Ptr(p, limit, shared)) == nullptr) return nullptr;
72  if ((p = GetVarint32Ptr(p, limit, non_shared)) == nullptr) return nullptr;
73  if ((p = GetVarint32Ptr(p, limit, value_length)) == nullptr) return nullptr;
74  }
75 
76  if (static_cast<uint32_t>(limit - p) < (*non_shared + *value_length)) {
77  return nullptr;
78  }
79  return p;
80 }

Here is the call graph for this function:

Here is the caller graph for this function:

uint32_t rocksdb::DecodeFixed32 ( const char *  ptr)
inline

Definition at line 67 of file coding.h.

References rocksdb::port::kLittleEndian.

Referenced by rocksdb::Blob::Blob(), rocksdb::WriteBatchInternal::Count(), DecodeFixed64(), rocksdb::Footer::DecodeFrom(), DecodeKey(), rocksdb::FilterBlockReader::FilterBlockReader(), rocksdb::Block::Iter::GetRestartPoint(), Hash(), rocksdb::DBWithTTL::IsStale(), rocksdb::TestHashFilter::KeyMayMatch(), rocksdb::crc32c::LE_LOAD32(), rocksdb::FilterBlockReader::MayMatch(), rocksdb::RedisListIterator::MoveNext(), rocksdb::SimpleTableIterator::Next(), rocksdb::Block::NumRestarts(), ReadBlockContents(), rocksdb::log::Reader::ReadPhysicalRecord(), rocksdb::RedisListIterator::RedisListIterator(), rocksdb::DBWithTTL::SanityCheckTimestamp(), TEST(), and rocksdb::TtlIterator::timestamp().

67  {
68  if (port::kLittleEndian) {
69  // Load the raw bytes
70  uint32_t result;
71  memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load
72  return result;
73  } else {
74  return ((static_cast<uint32_t>(static_cast<unsigned char>(ptr[0])))
75  | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[1])) << 8)
76  | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[2])) << 16)
77  | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[3])) << 24));
78  }
79 }

Here is the caller graph for this function:

uint64_t rocksdb::DecodeFixed64 ( const char *  ptr)
inline

Definition at line 81 of file coding.h.

References DecodeFixed32(), and rocksdb::port::kLittleEndian.

Referenced by rocksdb::InternalKeyComparator::Compare(), dumpDb(), ExtractValueType(), rocksdb::MemTable::Get(), Counters::get(), GetFileIterator(), GetInternalKeySeqno(), rocksdb::SimpleTableReader::GetOffset(), rocksdb::crc32c::LE_LOAD64(), rocksdb::SimpleTableReader::Open(), ParseInternalKey(), rocksdb::Version::PrefixMayMatch(), rocksdb::WriteBatchInternal::Sequence(), TEST(), and rocksdb::MemTable::Update().

81  {
82  if (port::kLittleEndian) {
83  // Load the raw bytes
84  uint64_t result;
85  memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load
86  return result;
87  } else {
88  uint64_t lo = DecodeFixed32(ptr);
89  uint64_t hi = DecodeFixed32(ptr + 4);
90  return (hi << 32) | lo;
91  }
92 }

Here is the call graph for this function:

Here is the caller graph for this function:

static int rocksdb::DecodeKey ( const Slice &  k)
static

Definition at line 26 of file cache_test.cc.

References rocksdb::Slice::data(), DecodeFixed32(), and rocksdb::Slice::size().

Referenced by rocksdb::CacheTest::Deleter().

26  {
27  assert(k.size() == 4);
28  return DecodeFixed32(k.data());
29 }

Here is the call graph for this function:

Here is the caller graph for this function:

static int rocksdb::DecodeValue ( void *  v)
static

Definition at line 31 of file cache_test.cc.

Referenced by rocksdb::CacheTest::Deleter(), rocksdb::CacheTest::Lookup(), and TEST().

31 { return reinterpret_cast<uintptr_t>(v); }

Here is the caller graph for this function:

static void rocksdb::DeleteCachedBlock ( const Slice &  key,
void *  value 
)
static

Definition at line 308 of file block_based_table_builder.cc.

References value.

Referenced by rocksdb::BlockBasedTable::BlockReader(), rocksdb::BlockBasedTable::GetBlock(), and rocksdb::BlockBasedTableBuilder::InsertBlockInCache().

308  {
309  Block* block = reinterpret_cast<Block*>(value);
310  delete block;
311 }

Here is the caller graph for this function:

static void rocksdb::DeleteEntry ( const Slice &  key,
void *  value 
)
static

Definition at line 21 of file table_cache.cc.

References value.

Referenced by rocksdb::TableCache::FindTable().

21  {
22  TableReader* table_reader = reinterpret_cast<TableReader*>(value);
23  delete table_reader;
24 }

Here is the caller graph for this function:

void rocksdb::deleter ( const Slice &  key,
void *  value 
)

Definition at line 352 of file cache_test.cc.

Referenced by TEST().

352  {
353  delete (Value *)value;
354 }

Here is the caller graph for this function:

rocksdb::delim_ ( "."  )

Referenced by is_input_key_hex_(), and print_stats_().

Here is the caller graph for this function:

std::string rocksdb::DescriptorFileName ( const std::string &  dbname,
uint64_t  number 
)

Definition at line 78 of file filename.cc.

Referenced by rocksdb::DBImpl::GetLiveFiles(), rocksdb::VersionSet::LogAndApply(), rocksdb::VersionSet::ManifestContains(), rocksdb::DBImpl::NewDB(), SetCurrentFile(), and TEST().

78  {
79  assert(number > 0);
80  char buf[100];
81  snprintf(buf, sizeof(buf), "/MANIFEST-%06llu",
82  static_cast<unsigned long long>(number));
83  return dbname + buf;
84 }

Here is the caller graph for this function:

Status rocksdb::DestroyDB ( const std::string &  dbname,
const Options &  options 
)

Definition at line 3524 of file db_impl.cc.

References ArchivalDirectory(), comparator, rocksdb::Options::comparator, rocksdb::Env::DeleteDir(), rocksdb::Env::DeleteFile(), env, rocksdb::Options::env, rocksdb::Options::filter_policy, rocksdb::Env::GetChildren(), kDBLockFile, kLogFile, kMetaDatabase, rocksdb::Env::LockFile(), LockFileName(), rocksdb::Status::OK(), rocksdb::Status::ok(), ParseFileName(), SanitizeOptions(), rocksdb::Env::UnlockFile(), and rocksdb::Options::wal_dir.

Referenced by rocksdb::Benchmark::Benchmark(), BM_LogAndApply(), rocksdb::CorruptionTest::CorruptionTest(), rocksdb::DBTest::DBTest(), rocksdb::DeleteFileTest::DeleteFileTest(), rocksdb::DBTest::Destroy(), rocksdb::SimpleTableDBTest::Destroy(), leveldb_destroy_db(), main(), rocksdb::DBConstructor::NewDB(), OpenDb(), ProfileKeyComparison(), rocksdb::RedisLists::RedisLists(), rocksdb::ReduceLevelTest::ReduceLevelTest(), rocksdb::DeleteFileTest::ReopenDB(), rocksdb::Benchmark::Run(), runTest(), rocksdb::SimpleTableDBTest::SimpleTableDBTest(), rocksdb::StressTest::StressTest(), rocksdb::StringAppendOperatorTest::StringAppendOperatorTest(), TableReaderBenchmark(), TEST(), rocksdb::TtlTest::TtlTest(), rocksdb::CorruptionTest::~CorruptionTest(), rocksdb::DBTest::~DBTest(), rocksdb::SimpleTableDBTest::~SimpleTableDBTest(), and rocksdb::TtlTest::~TtlTest().

3524  {
3525  const InternalKeyComparator comparator(options.comparator);
3526  const InternalFilterPolicy filter_policy(options.filter_policy);
3527  const Options& soptions(SanitizeOptions(
3528  dbname, &comparator, &filter_policy, options));
3529  Env* env = soptions.env;
3530  std::vector<std::string> filenames;
3531  std::vector<std::string> archiveFiles;
3532 
3533  std::string archivedir = ArchivalDirectory(dbname);
3534  // Ignore error in case directory does not exist
3535  env->GetChildren(dbname, &filenames);
3536 
3537  if (dbname != soptions.wal_dir) {
3538  std::vector<std::string> logfilenames;
3539  env->GetChildren(soptions.wal_dir, &logfilenames);
3540  filenames.insert(filenames.end(), logfilenames.begin(), logfilenames.end());
3541  archivedir = ArchivalDirectory(soptions.wal_dir);
3542  }
3543 
3544  if (filenames.empty()) {
3545  return Status::OK();
3546  }
3547 
3548  FileLock* lock;
3549  const std::string lockname = LockFileName(dbname);
3550  Status result = env->LockFile(lockname, &lock);
3551  if (result.ok()) {
3552  uint64_t number;
3553  FileType type;
3554  for (size_t i = 0; i < filenames.size(); i++) {
3555  if (ParseFileName(filenames[i], &number, &type) &&
3556  type != kDBLockFile) { // Lock file will be deleted at end
3557  Status del;
3558  if (type == kMetaDatabase) {
3559  del = DestroyDB(dbname + "/" + filenames[i], options);
3560  } else if (type == kLogFile) {
3561  del = env->DeleteFile(soptions.wal_dir + "/" + filenames[i]);
3562  } else {
3563  del = env->DeleteFile(dbname + "/" + filenames[i]);
3564  }
3565  if (result.ok() && !del.ok()) {
3566  result = del;
3567  }
3568  }
3569  }
3570 
3571  env->GetChildren(archivedir, &archiveFiles);
3572  // Delete archival files.
3573  for (size_t i = 0; i < archiveFiles.size(); ++i) {
3574  if (ParseFileName(archiveFiles[i], &number, &type) &&
3575  type == kLogFile) {
3576  Status del = env->DeleteFile(archivedir + "/" + archiveFiles[i]);
3577  if (result.ok() && !del.ok()) {
3578  result = del;
3579  }
3580  }
3581  }
3582  // ignore case where no archival directory is present.
3583  env->DeleteDir(archivedir);
3584 
3585  env->UnlockFile(lock); // Ignore error since state is already gone
3586  env->DeleteFile(lockname);
3587  env->DeleteDir(dbname); // Ignore error in case dir contains other files
3588  env->DeleteDir(soptions.wal_dir);
3589  }
3590  return result;
3591 }

Here is the call graph for this function:

Here is the caller graph for this function:

rocksdb::disable_wal_ ( false  )

Referenced by compact_().

Here is the caller graph for this function:

static void rocksdb::Do_Compression_Test ( CompressionType  comp)
static

Definition at line 1136 of file table_test.cc.

References rocksdb::Constructor::Add(), rocksdb::BlockBasedTableConstructor::ApproximateOffsetOf(), ASSERT_TRUE, Between(), rocksdb::Options::block_size, BytewiseComparator(), rocksdb::test::CompressibleString(), rocksdb::Options::compression, and rocksdb::Constructor::Finish().

Referenced by TEST().

1136  {
1137  Random rnd(301);
1138  BlockBasedTableConstructor c(BytewiseComparator());
1139  std::string tmp;
1140  c.Add("k01", "hello");
1141  c.Add("k02", test::CompressibleString(&rnd, 0.25, 10000, &tmp));
1142  c.Add("k03", "hello3");
1143  c.Add("k04", test::CompressibleString(&rnd, 0.25, 10000, &tmp));
1144  std::vector<std::string> keys;
1145  KVMap kvmap;
1146  Options options;
1147  options.block_size = 1024;
1148  options.compression = comp;
1149  c.Finish(options, &keys, &kvmap);
1150 
1151  ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
1152  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
1153  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
1154  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 2000, 3000));
1155  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 2000, 3000));
1156  ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 4000, 6000));
1157 }

Here is the call graph for this function:

Here is the caller graph for this function:

static Status rocksdb::DoWriteStringToFile ( Env *  env,
const Slice &  data,
const std::string &  fname,
bool  should_sync 
)
static

Definition at line 63 of file env.cc.

References rocksdb::WritableFile::Append(), rocksdb::Env::DeleteFile(), rocksdb::Env::NewWritableFile(), rocksdb::Status::ok(), and rocksdb::WritableFile::Sync().

Referenced by WriteStringToFile(), and WriteStringToFileSync().

65  {
66  unique_ptr<WritableFile> file;
67  EnvOptions soptions;
68  Status s = env->NewWritableFile(fname, &file, soptions);
69  if (!s.ok()) {
70  return s;
71  }
72  s = file->Append(data);
73  if (s.ok() && should_sync) {
74  s = file->Sync();
75  }
76  if (!s.ok()) {
77  env->DeleteFile(fname);
78  }
79  return s;
80 }

Here is the call graph for this function:

Here is the caller graph for this function:

static bool rocksdb::DummySaveValue ( void *  arg,
const Slice &  ikey,
const Slice &  v,
bool  didIO 
)
static

Definition at line 36 of file table_reader_bench.cc.

Referenced by TableReaderBenchmark().

37  {
38  return false;
39 }

Here is the caller graph for this function:

void rocksdb::dumpLeveldbBuildVersion ( Logger *  log)

Referenced by rocksdb::DBImpl::DBImpl().

Here is the caller graph for this function:

void rocksdb::EncodeFixed32 ( char *  buf,
uint32_t  value 
)

Definition at line 16 of file coding.cc.

Referenced by rocksdb::DBWithTTL::AppendTS(), rocksdb::log::Writer::EmitPhysicalRecord(), rocksdb::log::LogTest::FixChecksum(), rocksdb::TtlMergeOperator::FullMerge(), rocksdb::RedisListIterator::InsertElement(), rocksdb::TtlMergeOperator::PartialMerge(), PutFixed32(), rocksdb::WriteBatchInternal::SetCount(), rocksdb::BlockBasedTableBuilder::WriteRawBlock(), and rocksdb::RedisListIterator::WriteResult().

16  {
17 #if __BYTE_ORDER == __LITTLE_ENDIAN
18  memcpy(buf, &value, sizeof(value));
19 #else
20  buf[0] = value & 0xff;
21  buf[1] = (value >> 8) & 0xff;
22  buf[2] = (value >> 16) & 0xff;
23  buf[3] = (value >> 24) & 0xff;
24 #endif
25 }

Here is the caller graph for this function:

void rocksdb::EncodeFixed64 ( char *  buf,
uint64_t  value 
)

Definition at line 27 of file coding.cc.

Referenced by rocksdb::MemTable::Add(), MergeBasedCounters::add(), rocksdb::TableCache::Evict(), rocksdb::TableCache::FindTable(), rocksdb::LookupKey::LookupKey(), PutFixed64(), rocksdb::WriteBatchInternal::SetSequence(), UpdateInternalKey(), and rocksdb::Version::LevelFileNumIterator::value().

27  {
28 #if __BYTE_ORDER == __LITTLE_ENDIAN
29  memcpy(buf, &value, sizeof(value));
30 #else
31  buf[0] = value & 0xff;
32  buf[1] = (value >> 8) & 0xff;
33  buf[2] = (value >> 16) & 0xff;
34  buf[3] = (value >> 24) & 0xff;
35  buf[4] = (value >> 32) & 0xff;
36  buf[5] = (value >> 40) & 0xff;
37  buf[6] = (value >> 48) & 0xff;
38  buf[7] = (value >> 56) & 0xff;
39 #endif
40 }

Here is the caller graph for this function:

static std::string rocksdb::EncodeKey ( int  k)
static

Definition at line 21 of file cache_test.cc.

References PutFixed32().

21  {
22  std::string result;
23  PutFixed32(&result, k);
24  return result;
25 }

Here is the call graph for this function:

static const char* rocksdb::EncodeKey ( std::string *  scratch,
const Slice &  target 
)
static

Definition at line 81 of file memtable.cc.

References rocksdb::Slice::data(), PutVarint32(), and rocksdb::Slice::size().

Referenced by rocksdb::CacheTest::Erase(), rocksdb::CacheTest::Insert(), rocksdb::CacheTest::Lookup(), rocksdb::MemTableIterator::Seek(), and TEST().

81  {
82  scratch->clear();
83  PutVarint32(scratch, target.size());
84  scratch->append(target.data(), target.size());
85  return scratch->data();
86 }

Here is the call graph for this function:

Here is the caller graph for this function:

static void* rocksdb::EncodeValue ( uintptr_t  v)
static

Definition at line 30 of file cache_test.cc.

Referenced by rocksdb::CacheTest::Insert().

30 { return reinterpret_cast<void*>(v); }

Here is the caller graph for this function:

char * rocksdb::EncodeVarint32 ( char *  dst,
uint32_t  v 
)

Definition at line 54 of file coding.cc.

Referenced by rocksdb::MemTable::Add(), rocksdb::LookupKey::LookupKey(), PutVarint32(), and rocksdb::MemTable::Update().

54  {
55  // Operate on characters as unsigneds
56  unsigned char* ptr = reinterpret_cast<unsigned char*>(dst);
57  static const int B = 128;
58  if (v < (1<<7)) {
59  *(ptr++) = v;
60  } else if (v < (1<<14)) {
61  *(ptr++) = v | B;
62  *(ptr++) = v>>7;
63  } else if (v < (1<<21)) {
64  *(ptr++) = v | B;
65  *(ptr++) = (v>>7) | B;
66  *(ptr++) = v>>14;
67  } else if (v < (1<<28)) {
68  *(ptr++) = v | B;
69  *(ptr++) = (v>>7) | B;
70  *(ptr++) = (v>>14) | B;
71  *(ptr++) = v>>21;
72  } else {
73  *(ptr++) = v | B;
74  *(ptr++) = (v>>7) | B;
75  *(ptr++) = (v>>14) | B;
76  *(ptr++) = (v>>21) | B;
77  *(ptr++) = v>>28;
78  }
79  return reinterpret_cast<char*>(ptr);
80 }

Here is the caller graph for this function:

char * rocksdb::EncodeVarint64 ( char *  dst,
uint64_t  v 
)

Definition at line 88 of file coding.cc.

Referenced by rocksdb::BlockBasedTable::GenerateCachePrefix(), rocksdb::StringSource::GetUniqueId(), rocksdb::BlockBasedTableBuilder::InsertBlockInCache(), and PutVarint64().

88  {
89  static const unsigned int B = 128;
90  unsigned char* ptr = reinterpret_cast<unsigned char*>(dst);
91  while (v >= B) {
92  *(ptr++) = (v & (B-1)) | B;
93  v >>= 7;
94  }
95  *(ptr++) = static_cast<unsigned char>(v);
96  return reinterpret_cast<char*>(ptr);
97 }

Here is the caller graph for this function:

rocksdb::end_key_specified_ ( false  )

Referenced by max_keys_scanned_().

Here is the caller graph for this function:

std::string rocksdb::EscapeString ( const Slice &  value)

Definition at line 47 of file logging.cc.

References AppendEscapedStringTo().

Referenced by CompareIterators(), rocksdb::InternalKey::DebugString(), TEST(), and rocksdb::Harness::TestRandomAccess().

47  {
48  std::string r;
50  return r;
51 }

Here is the call graph for this function:

Here is the caller graph for this function:

void rocksdb::ExpectRecords ( const int  expected_no_records,
std::unique_ptr< TransactionLogIterator > &  iter 
)

Definition at line 4011 of file db_test.cc.

References ASSERT_EQ, and ReadRecords().

Referenced by TEST().

4013  {
4014  int num_records;
4015  ReadRecords(iter, num_records);
4016  ASSERT_EQ(num_records, expected_no_records);
4017 }

Here is the call graph for this function:

Here is the caller graph for this function:

Slice rocksdb::ExtractUserKey ( const Slice &  internal_key)
inline
ValueType rocksdb::ExtractValueType ( const Slice &  internal_key)
inline

Definition at line 80 of file dbformat.h.

References rocksdb::Slice::data(), DecodeFixed64(), and rocksdb::Slice::size().

80  {
81  assert(internal_key.size() >= 8);
82  const size_t n = internal_key.size();
83  uint64_t num = DecodeFixed64(internal_key.data() + n - 8);
84  unsigned char c = num & 0xff;
85  return static_cast<ValueType>(c);
86 }

Here is the call graph for this function:

int rocksdb::FindFile ( const InternalKeyComparator &  icmp,
const std::vector< FileMetaData * > &  files,
const Slice &  key 
)

Definition at line 60 of file version_set.cc.

References rocksdb::InternalKey::Encode(), ripple::f, and rocksdb::FileMetaData::largest.

Referenced by rocksdb::FindFileTest::Find(), rocksdb::Version::Get(), rocksdb::Version::HasOverlappingUserKey(), rocksdb::Version::LevelFileNumIterator::Seek(), and SomeFileOverlapsRange().

62  {
63  uint32_t left = 0;
64  uint32_t right = files.size();
65  while (left < right) {
66  uint32_t mid = (left + right) / 2;
67  const FileMetaData* f = files[mid];
68  if (icmp.InternalKeyComparator::Compare(f->largest.Encode(), key) < 0) {
69  // Key at "mid.largest" is < "target". Therefore all
70  // files at or before "mid" are uninteresting.
71  left = mid + 1;
72  } else {
73  // Key at "mid.largest" is >= "target". Therefore all files
74  // after "mid" are uninteresting.
75  right = mid;
76  }
77  }
78  return right;
79 }

Here is the call graph for this function:

Here is the caller graph for this function:

static int rocksdb::FlattenPath ( const std::string &  path,
char *  dest,
int  len 
)
static

Definition at line 23 of file filename.cc.

Referenced by InfoLogFileName(), and OldInfoLogFileName().

23  {
24  int write_idx = 0;
25  int i = 0;
26  int src_len = path.size();
27 
28  while (i < src_len && write_idx < len - 1) {
29  if ((path[i] >= 'a' && path[i] <= 'z') ||
30  (path[i] >= '0' && path[i] <= '9') ||
31  (path[i] >= 'A' && path[i] <= 'Z') ||
32  path[i] == '-' ||
33  path[i] == '.' ||
34  path[i] == '_'){
35  dest[write_idx++] = path[i];
36  } else {
37  if (i > 0)
38  dest[write_idx++] = '_';
39  }
40  i++;
41  }
42 
43  dest[write_idx] = '\0';
44  return write_idx;
45 }

Here is the caller graph for this function:

static std::vector<TestArgs> rocksdb::GenerateArgList ( )
static

Definition at line 493 of file table_test.cc.

References BLOCK_TEST, BZip2CompressionSupported(), rocksdb::TestArgs::compression, DB_TEST, kBZip2Compression, kNoCompression, kSnappyCompression, kZlibCompression, MEMTABLE_TEST, rocksdb::TestArgs::restart_interval, rocksdb::TestArgs::reverse_compare, SnappyCompressionSupported(), TABLE_TEST, rocksdb::TestArgs::type, and ZlibCompressionSupported().

Referenced by TEST().

493  {
494  std::vector<TestArgs> ret;
495  TestType test_type[4] = {TABLE_TEST, BLOCK_TEST, MEMTABLE_TEST, DB_TEST};
496  int test_type_len = 4;
497  bool reverse_compare[2] = {false, true};
498  int reverse_compare_len = 2;
499  int restart_interval[3] = {16, 1, 1024};
500  int restart_interval_len = 3;
501 
502  // Only add compression if it is supported
503  std::vector<CompressionType> compression_types;
504  compression_types.push_back(kNoCompression);
505 #ifdef SNAPPY
507  compression_types.push_back(kSnappyCompression);
508 #endif
509 
510 #ifdef ZLIB
512  compression_types.push_back(kZlibCompression);
513 #endif
514 
515 #ifdef BZIP2
517  compression_types.push_back(kBZip2Compression);
518 #endif
519 
520  for(int i =0; i < test_type_len; i++)
521  for (int j =0; j < reverse_compare_len; j++)
522  for (int k =0; k < restart_interval_len; k++)
523  for (unsigned int n =0; n < compression_types.size(); n++) {
524  TestArgs one_arg;
525  one_arg.type = test_type[i];
526  one_arg.reverse_compare = reverse_compare[j];
527  one_arg.restart_interval = restart_interval[k];
528  one_arg.compression = compression_types[n];
529  ret.push_back(one_arg);
530  }
531 
532  return ret;
533 }

Here is the call graph for this function:

Here is the caller graph for this function:

CompressionType rocksdb::GetCompressionType ( const Options &  options,
int  level,
const bool  enable_compression 
)

Definition at line 205 of file db_impl.cc.

References rocksdb::Options::compression, rocksdb::Options::compression_per_level, kNoCompression, ripple::max(), and ripple::min().

Referenced by rocksdb::DBImpl::OpenCompactionOutputFile().

206  {
207  if (!enable_compression) {
208  // disable compression
209  return kNoCompression;
210  }
211  // If the use has specified a different compression level for each level,
212  // then pick the compresison for that level.
213  if (!options.compression_per_level.empty()) {
214  const int n = options.compression_per_level.size() - 1;
215  // It is possible for level_ to be -1; in that case, we use level
216  // 0's compression. This occurs mostly in backwards compatibility
217  // situations when the builder doesn't know what level the file
218  // belongs to. Likewise, if level_ is beyond the end of the
219  // specified compression levels, use the last value.
220  return options.compression_per_level[std::max(0, std::min(level, n))];
221  } else {
222  return options.compression;
223  }
224 }

Here is the call graph for this function:

Here is the caller graph for this function:

uint64_t rocksdb::GetDeletedKeys ( const TableProperties::UserCollectedProperties &  props)

Definition at line 153 of file table_properties_collector.cc.

References GetVarint64(), and rocksdb::InternalKeyTablePropertiesNames::kDeletedKeys.

Referenced by TEST().

154  {
155  auto pos = props.find(InternalKeyTablePropertiesNames::kDeletedKeys);
156  if (pos == props.end()) {
157  return 0;
158  }
159  Slice raw = pos->second;
160  uint64_t val = 0;
161  return GetVarint64(&raw, &val) ? val : 0;
162 }

Here is the call graph for this function:

Here is the caller graph for this function:

void rocksdb::GetFileCreateTime ( const std::string &  fname,
uint64_t file_ctime 
)

Definition at line 56 of file auto_roll_logger_test.cc.

Referenced by rocksdb::AutoRollLoggerTest::RollLogFileByTimeTest().

56  {
57  struct stat s;
58  if (stat(fname.c_str(), &s) != 0) {
59  *file_ctime = (uint64_t)0;
60  }
61  *file_ctime = static_cast<uint64_t>(s.st_ctime);
62 }

Here is the caller graph for this function:

static Iterator* rocksdb::GetFileIterator ( void *  arg,
const ReadOptions &  options,
const EnvOptions &  soptions,
const Slice &  file_value,
bool  for_compaction 
)
static

Definition at line 187 of file version_set.cc.

References arg, rocksdb::Status::Corruption(), rocksdb::Slice::data(), DecodeFixed64(), NewErrorIterator(), rocksdb::TableCache::NewIterator(), rocksdb::ReadOptions::prefix, and rocksdb::Slice::size().

Referenced by rocksdb::VersionSet::MakeInputIterator(), and rocksdb::Version::NewConcatenatingIterator().

191  {
192  TableCache* cache = reinterpret_cast<TableCache*>(arg);
193  if (file_value.size() != 16) {
194  return NewErrorIterator(
195  Status::Corruption("FileReader invoked with unexpected value"));
196  } else {
197  ReadOptions options_copy;
198  if (options.prefix) {
199  // suppress prefix filtering since we have already checked the
200  // filters once at this point
201  options_copy = options;
202  options_copy.prefix = nullptr;
203  }
204  return cache->NewIterator(options.prefix ? options_copy : options,
205  soptions,
206  DecodeFixed64(file_value.data()),
207  DecodeFixed64(file_value.data() + 8),
208  nullptr /* don't need reference to table*/,
209  for_compaction);
210  }
211 }

Here is the call graph for this function:

Here is the caller graph for this function:

static bool rocksdb::GetInternalKey ( Slice *  input,
InternalKey *  dst 
)
static

Definition at line 98 of file version_edit.cc.

References rocksdb::InternalKey::DecodeFrom(), and GetLengthPrefixedSlice().

Referenced by rocksdb::VersionEdit::DecodeFrom().

98  {
99  Slice str;
100  if (GetLengthPrefixedSlice(input, &str)) {
101  dst->DecodeFrom(str);
102  return true;
103  } else {
104  return false;
105  }
106 }

Here is the call graph for this function:

Here is the caller graph for this function:

uint64_t rocksdb::GetInternalKeySeqno ( const Slice &  internal_key)
inline

Definition at line 181 of file dbformat.h.

References rocksdb::Slice::data(), DecodeFixed64(), and rocksdb::Slice::size().

Referenced by BuildTable(), and rocksdb::DBImpl::DoCompactionWork().

181  {
182  const size_t n = internal_key.size();
183  assert(n >= 8);
184  uint64_t num = DecodeFixed64(internal_key.data() + n - 8);
185  return num >> 8;
186 }

Here is the call graph for this function:

Here is the caller graph for this function:

const char* rocksdb::GetLengthPrefixedSlice ( const char *  p,
const char *  limit,
Slice *  result 
)

Definition at line 191 of file coding.cc.

References GetVarint32Ptr().

Referenced by rocksdb::VersionEdit::DecodeFrom(), rocksdb::MemTable::Get(), GetInternalKey(), rocksdb::WriteBatch::Iterate(), rocksdb::MemTableIterator::key(), rocksdb::MemTable::KeyComparator::operator()(), rocksdb::stl_wrappers::Hash::operator()(), TEST(), rocksdb::MemTableRep::UserKey(), and rocksdb::MemTableIterator::value().

192  {
193  uint32_t len;
194  p = GetVarint32Ptr(p, limit, &len);
195  if (p == nullptr) return nullptr;
196  if (p + len > limit) return nullptr;
197  *result = Slice(p, len);
198  return p + len;
199 }

Here is the call graph for this function:

Here is the caller graph for this function:

bool rocksdb::GetLengthPrefixedSlice ( Slice *  input,
Slice *  result 
)

Definition at line 201 of file coding.cc.

References rocksdb::Slice::data(), GetVarint32(), rocksdb::Slice::remove_prefix(), and rocksdb::Slice::size().

201  {
202  uint32_t len;
203  if (GetVarint32(input, &len) &&
204  input->size() >= len) {
205  *result = Slice(input->data(), len);
206  input->remove_prefix(len);
207  return true;
208  } else {
209  return false;
210  }
211 }

Here is the call graph for this function:

Slice rocksdb::GetLengthPrefixedSlice ( const char *  data)

Definition at line 213 of file coding.cc.

References GetVarint32Ptr().

213  {
214  uint32_t len;
215  const char* p = data;
216  p = GetVarint32Ptr(p, p + 5, &len); // +5: we assume "p" is not corrupted
217  return Slice(p, len);
218 }

Here is the call graph for this function:

uint64_t rocksdb::GetLogDirSize ( std::string  dir_path,
SpecialEnv *  env 
)

Definition at line 3934 of file db_test.cc.

References ripple::f, rocksdb::EnvWrapper::GetChildren(), rocksdb::EnvWrapper::GetFileSize(), kLogFile, and ParseFileName().

Referenced by TEST().

3934  {
3935  uint64_t dir_size = 0;
3936  std::vector<std::string> files;
3937  env->GetChildren(dir_path, &files);
3938  for (auto& f : files) {
3939  uint64_t number;
3940  FileType type;
3941  if (ParseFileName(f, &number, &type) && type == kLogFile) {
3942  std::string const file_path = dir_path + "/" + f;
3943  uint64_t file_size;
3944  env->GetFileSize(file_path, &file_size);
3945  dir_size += file_size;
3946  }
3947  }
3948  return dir_size;
3949 }

Here is the call graph for this function:

Here is the caller graph for this function:

TableBuilder * rocksdb::GetTableBuilder ( const Options &  options,
WritableFile *  file,
CompressionType  compression_type 
)

Definition at line 29 of file builder.cc.

References rocksdb::Options::table_factory.

Referenced by BuildTable(), and rocksdb::DBImpl::OpenCompactionOutputFile().

30  {
31  return options.table_factory->GetTableBuilder(options, file,
32  compression_type);
33 }

Here is the caller graph for this function:

bool rocksdb::GetVarint32 ( Slice *  input,
uint32_t value 
)

Definition at line 150 of file coding.cc.

References rocksdb::Slice::data(), GetVarint32Ptr(), and rocksdb::Slice::size().

Referenced by rocksdb::VersionEdit::DecodeFrom(), GetLengthPrefixedSlice(), rocksdb::VersionEdit::GetLevel(), and TEST().

150  {
151  const char* p = input->data();
152  const char* limit = p + input->size();
153  const char* q = GetVarint32Ptr(p, limit, value);
154  if (q == nullptr) {
155  return false;
156  } else {
157  *input = Slice(q, limit - q);
158  return true;
159  }
160 }

Here is the call graph for this function:

Here is the caller graph for this function:

const char * rocksdb::GetVarint32Ptr ( const char *  p,
const char *  limit,
uint32_t v 
)
inline

Definition at line 98 of file coding.h.

References GetVarint32PtrFallback().

Referenced by DecodeEntry(), rocksdb::MemTable::Get(), GetLengthPrefixedSlice(), GetVarint32(), TEST(), and rocksdb::MemTable::Update().

100  {
101  if (p < limit) {
102  uint32_t result = *(reinterpret_cast<const unsigned char*>(p));
103  if ((result & 128) == 0) {
104  *value = result;
105  return p + 1;
106  }
107  }
108  return GetVarint32PtrFallback(p, limit, value);
109 }

Here is the call graph for this function:

Here is the caller graph for this function:

const char * rocksdb::GetVarint32PtrFallback ( const char *  p,
const char *  limit,
uint32_t value 
)

Definition at line 131 of file coding.cc.

Referenced by GetVarint32Ptr().

133  {
134  uint32_t result = 0;
135  for (uint32_t shift = 0; shift <= 28 && p < limit; shift += 7) {
136  uint32_t byte = *(reinterpret_cast<const unsigned char*>(p));
137  p++;
138  if (byte & 128) {
139  // More bytes are present
140  result |= ((byte & 127) << shift);
141  } else {
142  result |= (byte << shift);
143  *value = result;
144  return reinterpret_cast<const char*>(p);
145  }
146  }
147  return nullptr;
148 }

Here is the caller graph for this function:

bool rocksdb::GetVarint64 ( Slice *  input,
uint64_t value 
)

Definition at line 179 of file coding.cc.

References rocksdb::Slice::data(), GetVarint64Ptr(), and rocksdb::Slice::size().

Referenced by rocksdb::BlockHandle::DecodeFrom(), rocksdb::VersionEdit::DecodeFrom(), GetDeletedKeys(), IsSingleVarint(), and rocksdb::BlockBasedTable::ReadProperties().

179  {
180  const char* p = input->data();
181  const char* limit = p + input->size();
182  const char* q = GetVarint64Ptr(p, limit, value);
183  if (q == nullptr) {
184  return false;
185  } else {
186  *input = Slice(q, limit - q);
187  return true;
188  }
189 }

Here is the call graph for this function:

Here is the caller graph for this function:

const char * rocksdb::GetVarint64Ptr ( const char *  p,
const char *  limit,
uint64_t value 
)

Definition at line 162 of file coding.cc.

Referenced by GetVarint64(), and TEST().

162  {
163  uint64_t result = 0;
164  for (uint32_t shift = 0; shift <= 63 && p < limit; shift += 7) {
165  uint64_t byte = *(reinterpret_cast<const unsigned char*>(p));
166  p++;
167  if (byte & 128) {
168  // More bytes are present
169  result |= ((byte & 127) << shift);
170  } else {
171  result |= (byte << shift);
172  *value = result;
173  return reinterpret_cast<const char*>(p);
174  }
175  }
176  return nullptr;
177 }

Here is the caller graph for this function:

rocksdb::has_from_ ( false  )

Referenced by is_input_key_hex_().

Here is the caller graph for this function:

rocksdb::has_to_ ( false  )

Referenced by is_input_key_hex_().

Here is the caller graph for this function:

uint32_t rocksdb::Hash ( const char *  data,
size_t  n,
uint32_t  seed 
)

Definition at line 16 of file hash.cc.

References DecodeFixed32().

Referenced by rocksdb::TestHashFilter::CreateFilter(), rocksdb::ConcurrentTest::HashNumbers(), and rocksdb::TestHashFilter::KeyMayMatch().

16  {
17  // Similar to murmur hash
18  const uint32_t m = 0xc6a4a793;
19  const uint32_t r = 24;
20  const char* limit = data + n;
21  uint32_t h = seed ^ (n * m);
22 
23  // Pick up four bytes at a time
24  while (data + 4 <= limit) {
25  uint32_t w = DecodeFixed32(data);
26  data += 4;
27  h += w;
28  h *= m;
29  h ^= (h >> 16);
30  }
31 
32  // Pick up remaining bytes
33  switch (limit - data) {
34  case 3:
35  h += data[2] << 16;
36  // fall through
37  case 2:
38  h += data[1] << 8;
39  // fall through
40  case 1:
41  h += data[0];
42  h *= m;
43  h ^= (h >> r);
44  break;
45  }
46  return h;
47 }

Here is the call graph for this function:

Here is the caller graph for this function:

std::string rocksdb::IdentityFileName ( const std::string &  dbname)

Definition at line 130 of file filename.cc.

Referenced by rocksdb::DBImpl::Recover(), SetIdentityFile(), and TEST().

130  {
131  return dbname + "/IDENTITY";
132 }

Here is the caller graph for this function:

rocksdb::if ( params.size()!  = 1)

Definition at line 1359 of file ldb_cmd.cc.

References rocksdb::LDBCommand::exec_state_, and rocksdb::LDBCommandExecuteResult::FAILED().

Referenced by beast::asio::InputParser::Get< UInt32Str >::func(), ripple::Pathfinder::getPaths(), ripple::LedgerConsensusImp::getTransactionTree(), ripple::NetworkOPsImp::getTXMap(), ripple::RippleAddress::humanAccountID(), rocksdb::PosixLogger::Logv(), rocksdb::DBImpl::PurgeObsoleteWALFiles(), ripple::PeerImp::recvGetObjectByHash(), ripple::FeaturesImpl::setJson(), and beast::sysinfo().

1359  {
1360  exec_state_ = LDBCommandExecuteResult::FAILED(
1361  "<key> must be specified for the get command");
1362  } else {

Here is the call graph for this function:

Here is the caller graph for this function:

rocksdb::if ( is_key_hex_  )

Definition at line 1366 of file ldb_cmd.cc.

References rocksdb::LDBCommand::HexToString(), and rocksdb::GetCommand::key_.

1366  {
1367  key_ = HexToString(key_);
1368  }

Here is the call graph for this function:

static std::string rocksdb::IKey ( const std::string &  user_key,
uint64_t  seq,
ValueType  vt 
)
static

Definition at line 16 of file dbformat_test.cc.

References AppendInternalKey().

Referenced by TEST(), and TestKey().

18  {
19  std::string encoded;
20  AppendInternalKey(&encoded, ParsedInternalKey(user_key, seq, vt));
21  return encoded;
22 }

Here is the call graph for this function:

Here is the caller graph for this function:

void rocksdb::IncBucketCounts ( vector< uint64_t > &  bucket_counts,
int  ttl_start,
int  time_range,
int  bucket_size,
int  timekv,
int  num_buckets 
)

Definition at line 564 of file ldb_cmd.cc.

Referenced by rocksdb::DBDumperCommand::DoCommand().

565  {
566  assert(time_range > 0 && timekv >= ttl_start && bucket_size > 0 &&
567  timekv < (ttl_start + time_range) && num_buckets > 1);
568  int bucket = (timekv - ttl_start) / bucket_size;
569  bucket_counts[bucket]++;
570 }

Here is the caller graph for this function:

static void rocksdb::Increment ( const Comparator *  cmp,
std::string *  key 
)
static

Definition at line 76 of file table_test.cc.

References BytewiseComparator(), and reverse_key_comparator.

Referenced by rocksdb::Harness::PickRandomKey().

76  {
77  if (cmp == BytewiseComparator()) {
78  key->push_back('\0');
79  } else {
80  assert(cmp == &reverse_key_comparator);
81  std::string rev = Reverse(*key);
82  rev.push_back('\0');
83  *key = Reverse(rev);
84  }
85 }

Here is the call graph for this function:

Here is the caller graph for this function:

std::string rocksdb::InfoLogFileName ( const std::string &  dbname,
const std::string &  db_path,
const std::string &  log_dir 
)

Definition at line 99 of file filename.cc.

References FlattenPath().

Referenced by rocksdb::AutoRollLogger::AutoRollLogger(), and CreateLoggerFromOptions().

100  {
101  if (log_dir.empty())
102  return dbname + "/LOG";
103 
104  char flatten_db_path[256];
105  FlattenPath(db_path, flatten_db_path, 256);
106  return log_dir + "/" + flatten_db_path + "_LOG";
107 }

Here is the call graph for this function:

Here is the caller graph for this function:

static void rocksdb::InitModule ( )
static

Definition at line 77 of file comparator.cc.

References bytewise.

Referenced by BytewiseComparator().

77  {
78  bytewise = new BytewiseComparatorImpl;
79 }

Here is the caller graph for this function:

static void rocksdb::InputSummary ( std::vector< FileMetaData * > &  files,
char *  output,
int  len 
)
static

Definition at line 3114 of file version_set.cc.

Referenced by rocksdb::Compaction::Summary().

3116  {
3117  int write = 0;
3118  for (unsigned int i = 0; i < files.size(); i++) {
3119  int sz = len - write;
3120  int ret = snprintf(output + write, sz, "%lu(%lu) ",
3121  (unsigned long)files.at(i)->number,
3122  (unsigned long)files.at(i)->file_size);
3123  if (ret < 0 || ret >= sz)
3124  break;
3125  write += ret;
3126  }
3127 }

Here is the caller graph for this function:

void rocksdb::InstallStackTraceHandler ( )

Definition at line 98 of file stack_trace.cc.

Referenced by main().

98 {}

Here is the caller graph for this function:

size_t rocksdb::InternalKeyEncodingLength ( const ParsedInternalKey &  key)
inline

Definition at line 59 of file dbformat.h.

References rocksdb::Slice::size(), and rocksdb::ParsedInternalKey::user_key.

59  {
60  return key.user_key.size() + 8;
61 }

Here is the call graph for this function:

rocksdb::is_input_key_hex_ ( false  )

Definition at line 607 of file ldb_cmd.cc.

References count_delim_(), count_only_(), delim_(), has_from_(), has_to_(), HexToString(), max_keys_(), and print_stats_().

607  {
608 
609  has_from_ = ParseStringOption(options, ARG_FROM, &from_);
610  has_to_ = ParseStringOption(options, ARG_TO, &to_);
611 
612  ParseIntOption(options, ARG_MAX_KEYS, max_keys_, exec_state_);
613  map<string, string>::const_iterator itr = options.find(ARG_COUNT_DELIM);
614  if (itr != options.end()) {
615  delim_ = itr->second;
616  count_delim_ = true;
617  // fprintf(stdout,"delim = %c\n",delim_[0]);
618  } else {
619  count_delim_ = IsFlagPresent(flags, ARG_COUNT_DELIM);
620  delim_=".";
621  }
622 
623  print_stats_ = IsFlagPresent(flags, ARG_STATS);
624  count_only_ = IsFlagPresent(flags, ARG_COUNT_ONLY);
625  is_input_key_hex_ = IsFlagPresent(flags, ARG_INPUT_KEY_HEX);
626 
627  if (is_input_key_hex_) {
628  if (has_from_) {
629  from_ = HexToString(from_);
630  }
631  if (has_to_) {
632  to_ = HexToString(to_);
633  }
634  }
635 }

Here is the call graph for this function:

bool rocksdb::IsSingleVarint ( const std::string &  s)

Definition at line 182 of file env_test.cc.

References GetVarint64(), and rocksdb::Slice::size().

182  {
183  Slice slice(s);
184 
185  uint64_t v;
186  if (!GetVarint64(&slice, &v)) {
187  return false;
188  }
189 
190  return slice.size() == 0;
191 }

Here is the call graph for this function:

static Slice rocksdb::Key ( int  i,
char *  buffer 
)
static

Definition at line 20 of file bloom_test.cc.

20  {
21  memcpy(buffer, &i, sizeof(i));
22  return Slice(buffer, sizeof(i));
23 }
static std::string rocksdb::Key ( long  val)
static

Definition at line 349 of file db_stress.cc.

References PutFixed64().

349  {
350  std::string little_endian_key;
351  std::string big_endian_key;
352  PutFixed64(&little_endian_key, val);
353  assert(little_endian_key.size() == sizeof(val));
354  big_endian_key.resize(sizeof(val));
355  for (int i=0; i<(int)sizeof(val); i++) {
356  big_endian_key[i] = little_endian_key[sizeof(val) - 1 - i];
357  }
358  return big_endian_key;
359 }

Here is the call graph for this function:

static std::string rocksdb::Key ( int  i)
static

Definition at line 678 of file db_test.cc.

678  {
679  char buf[100];
680  snprintf(buf, sizeof(buf), "key%06d", i);
681  return std::string(buf);
682 }
static std::string rocksdb::Key ( int  i)
static

Definition at line 743 of file simple_table_db_test.cc.

743  {
744  char buf[100];
745  snprintf(buf, sizeof(buf), "key_______%06d", i);
746  return std::string(buf);
747 }
std::vector<std::uint64_t> rocksdb::ListLogFiles ( Env *  env,
const std::string &  path 
)

Definition at line 3872 of file db_test.cc.

References rocksdb::Env::GetChildren(), kLogFile, and ParseFileName().

Referenced by TEST().

3872  {
3873  std::vector<std::string> files;
3874  std::vector<uint64_t> log_files;
3875  env->GetChildren(path, &files);
3876  uint64_t number;
3877  FileType type;
3878  for (size_t i = 0; i < files.size(); ++i) {
3879  if (ParseFileName(files[i], &number, &type)) {
3880  if (type == kLogFile) {
3881  log_files.push_back(number);
3882  }
3883  }
3884  }
3885  return std::move(log_files);
3886 }

Here is the call graph for this function:

Here is the caller graph for this function:

std::string rocksdb::LockFileName ( const std::string &  dbname)

Definition at line 90 of file filename.cc.

Referenced by DestroyDB(), rocksdb::DBImpl::Recover(), and TEST().

90  {
91  return dbname + "/LOCK";
92 }

Here is the caller graph for this function:

void rocksdb::Log ( const shared_ptr< Logger > &  info_log,
const char *  format,
  ... 
)

Definition at line 54 of file env.cc.

Referenced by ripple::addTxnSeqField(), ripple::Ledger::assertSane(), rocksdb::DBImpl::BackgroundCallCompaction(), rocksdb::DBImpl::BackgroundCallFlush(), rocksdb::DBImpl::BackgroundCompaction(), rocksdb::DBImpl::BackgroundFlush(), ripple::Transaction::checkSign(), rocksdb::VersionSet::CompactRange(), rocksdb::LogReporter::Corruption(), rocksdb::DBImpl::DBImpl(), rocksdb::DBImplReadOnly::DBImplReadOnly(), rocksdb::DBImpl::DelayLoggingAndReset(), rocksdb::DBImpl::DeleteFile(), rocksdb::DBImpl::DisableFileDeletions(), rocksdb::DBImpl::DoCompactionWork(), ripple::AccountState::dump(), rocksdb::Options::Dump(), rocksdb::DBImpl::EnableFileDeletions(), rocksdb::DBImpl::findEarliestVisibleSnapshot(), rocksdb::BlockBasedTableBuilder::Finish(), rocksdb::DBImpl::FinishCompactionOutputFile(), rocksdb::DBImpl::FlushMemTableToOutputFile(), rocksdb::TtlMergeOperator::FullMerge(), rocksdb::DBImpl::GetLiveFiles(), ripple::Ledger::getSQL(), AutoSocket::handle_autodetect(), rocksdb::LogReporter::Info(), rocksdb::DBImpl::InstallCompactionResults(), rocksdb::MemTableList::InstallMemtableFlushResults(), ripple::SerializedValidation::isValid(), rocksdb::SimpleTableIterator::key(), rocksdb::VersionSet::LogAndApply(), LogMessage(), ripple::logTimedDestroy(), rocksdb::DBImpl::MakeRoomForWrite(), rocksdb::VersionSet::ManifestContains(), rocksdb::DBImpl::MaybeDumpStats(), rocksdb::DBImpl::MaybeIgnoreError(), rocksdb::BlockBasedTable::Open(), ripple::STObject::operator==(), rocksdb::TtlMergeOperator::PartialMerge(), rocksdb::VersionSet::PickCompactionUniversal(), rocksdb::VersionSet::PickCompactionUniversalReadAmp(), rocksdb::VersionSet::PickCompactionUniversalSizeAmp(), rocksdb::DBImpl::PrintStatistics(), rocksdb::DBImpl::PurgeObsoleteFiles(), rocksdb::DBImpl::PurgeObsoleteWALFiles(), rocksdb::DBImpl::ReadFirstLine(), rocksdb::BlockBasedTable::ReadMetaBlock(), rocksdb::BlockBasedTable::ReadProperties(), rocksdb::VersionSet::Recover(), rocksdb::DBImpl::RecoverLogFile(), rocksdb::DBImpl::ReFitLevel(), AutoSocket::rfc2818_verify(), SanitizeOptions(), ripple::schemaHas(), ripple::SerializedTransaction::SerializedTransaction(), ripple::SerializedValidation::SerializedValidation(), ripple::LogSink::setLogFile(), ripple::NetworkOPsImp::setMode(), rocksdb::VersionSet::SetupOtherInputs(), ripple::Transaction::sharedTransaction(), ripple::Transaction::sign(), rocksdb::DBImpl::TEST_CompactRange(), ripple::ApplicationImp::updateTables(), rocksdb::VersionSet::VerifyCompactionFileConsistency(), ripple::Ledger::walkLedger(), ripple::Ledger::writeBack(), rocksdb::DBImpl::WriteLevel0Table(), and rocksdb::DBImpl::WriteLevel0TableForRecovery().

54  {
55  if (info_log) {
56  va_list ap;
57  va_start(ap, format);
58  info_log->Logv(format, ap);
59  va_end(ap);
60  }
61 }
void rocksdb::Log ( Logger *  info_log,
const char *  format,
  ... 
)

Definition at line 39 of file env.cc.

References rocksdb::Logger::Logv().

39  {
40  if (info_log) {
41  va_list ap;
42  va_start(ap, format);
43  info_log->Logv(format, ap);
44  va_end(ap);
45  }
46 }

Here is the call graph for this function:

std::string rocksdb::LogFileName ( const std::string &  name,
uint64_t  number 
)
void rocksdb::LogFlush ( Logger *  info_log)

Definition at line 33 of file env.cc.

References rocksdb::Logger::Flush().

33  {
34  if (info_log) {
35  info_log->Flush();
36  }
37 }

Here is the call graph for this function:

void rocksdb::LogMessage ( Logger *  logger,
const char *  message 
)

Definition at line 52 of file auto_roll_logger_test.cc.

References Log().

Referenced by rocksdb::AutoRollLoggerTest::RollLogFileBySizeTest(), rocksdb::AutoRollLoggerTest::RollLogFileByTimeTest(), and TEST().

52  {
53  Log(logger, "%s", message);
54 }

Here is the call graph for this function:

Here is the caller graph for this function:

void rocksdb::MakeBuilder ( const Options &  options,
std::unique_ptr< FakeWritableFile > *  writable,
std::unique_ptr< TableBuilder > *  builder 
)

Definition at line 86 of file table_properties_collector_test.cc.

References rocksdb::Options::compression, and rocksdb::Options::table_factory.

Referenced by TEST().

89  {
90  writable->reset(new FakeWritableFile);
91  builder->reset(
92  options.table_factory->GetTableBuilder(options, writable->get(),
93  options.compression));
94 }

Here is the caller graph for this function:

static std::string rocksdb::MakeFileName ( const std::string &  name,
uint64_t  number,
const char *  suffix 
)
static

Definition at line 51 of file filename.cc.

Referenced by ArchivedLogFileName(), LogFileName(), TableFileName(), and TempFileName().

52  {
53  char buf[100];
54  snprintf(buf, sizeof(buf), "/%06llu.%s",
55  static_cast<unsigned long long>(number),
56  suffix);
57  return name + buf;
58 }

Here is the caller graph for this function:

static std::string rocksdb::MakeKey ( int  i,
int  j,
bool  through_db 
)
static

Definition at line 22 of file table_reader_bench.cc.

References rocksdb::InternalKey::Encode(), kTypeValue, and rocksdb::Slice::ToString().

22  {
23  char buf[100];
24  snprintf(buf, sizeof(buf), "%04d__key___%04d", i, j);
25  if (through_db) {
26  return std::string(buf);
27  }
28  // If we directly query table, which operates on internal keys
29  // instead of user keys, we need to add 8 bytes of internal
30  // information (row type etc) to user key to make an internal
31  // key.
32  InternalKey key(std::string(buf), 0, ValueType::kTypeValue);
33  return key.Encode().ToString();
34 }

Here is the call graph for this function:

std::string rocksdb::MakeKey ( unsigned int  num)

Definition at line 4857 of file db_test.cc.

Referenced by BM_LogAndApply(), and TableReaderBenchmark().

4857  {
4858  char buf[30];
4859  snprintf(buf, sizeof(buf), "%016u", num);
4860  return std::string(buf);
4861 }

Here is the caller graph for this function:

void rocksdb::MakeUpper ( std::string *const  s)

THE manual REDIS TEST begins here THIS WILL ONLY OCCUR IF YOU RUN: ./redis_test -m

Definition at line 741 of file redis_lists_test.cc.

Referenced by manual_redis_test().

741  {
742  int len = s->length();
743  for(int i=0; i<len; ++i) {
744  (*s)[i] = toupper((*s)[i]); // C-version defined in <ctype.h>
745  }
746 }

Here is the caller graph for this function:

int rocksdb::manual_redis_test ( bool  destructive)

Allows the user to enter in REDIS commands into the command-line. This is useful for manual / interacticve testing / debugging. Use destructive=true to clean the database before use. Use destructive=false to remember the previous state (i.e.: persistent) Should be called from main function.

Definition at line 753 of file redis_lists_test.cc.

References rocksdb::RedisLists::Index(), rocksdb::RedisLists::InsertAfter(), rocksdb::RedisLists::InsertBefore(), rocksdb::RedisListsTest::kDefaultDbName, rocksdb::RedisLists::Length(), MakeUpper(), rocksdb::RedisListsTest::options, rocksdb::RedisLists::PopLeft(), rocksdb::RedisLists::PopRight(), rocksdb::RedisLists::Print(), rocksdb::RedisLists::PushLeft(), rocksdb::RedisLists::PushRight(), rocksdb::RedisLists::Range(), rocksdb::RedisLists::Remove(), rocksdb::RedisLists::Set(), and rocksdb::RedisLists::Trim().

Referenced by main().

753  {
754  RedisLists redis(RedisListsTest::kDefaultDbName,
755  RedisListsTest::options,
756  destructive);
757 
758  // TODO: Right now, please use spaces to separate each word.
759  // In actual redis, you can use quotes to specify compound values
760  // Example: RPUSH mylist "this is a compound value"
761 
762  std::string command;
763  while(true) {
764  cin >> command;
765  MakeUpper(&command);
766 
767  if (command == "LINSERT") {
768  std::string k, t, p, v;
769  cin >> k >> t >> p >> v;
770  MakeUpper(&t);
771  if (t=="BEFORE") {
772  std::cout << redis.InsertBefore(k, p, v) << std::endl;
773  } else if (t=="AFTER") {
774  std::cout << redis.InsertAfter(k, p, v) << std::endl;
775  }
776  } else if (command == "LPUSH") {
777  std::string k, v;
778  std::cin >> k >> v;
779  redis.PushLeft(k, v);
780  } else if (command == "RPUSH") {
781  std::string k, v;
782  std::cin >> k >> v;
783  redis.PushRight(k, v);
784  } else if (command == "LPOP") {
785  std::string k;
786  std::cin >> k;
787  string res;
788  redis.PopLeft(k, &res);
789  std::cout << res << std::endl;
790  } else if (command == "RPOP") {
791  std::string k;
792  std::cin >> k;
793  string res;
794  redis.PopRight(k, &res);
795  std::cout << res << std::endl;
796  } else if (command == "LREM") {
797  std::string k;
798  int amt;
799  std::string v;
800 
801  std::cin >> k >> amt >> v;
802  std::cout << redis.Remove(k, amt, v) << std::endl;
803  } else if (command == "LLEN") {
804  std::string k;
805  std::cin >> k;
806  std::cout << redis.Length(k) << std::endl;
807  } else if (command == "LRANGE") {
808  std::string k;
809  int i, j;
810  std::cin >> k >> i >> j;
811  std::vector<std::string> res = redis.Range(k, i, j);
812  for (auto it = res.begin(); it != res.end(); ++it) {
813  std::cout << " " << (*it);
814  }
815  std::cout << std::endl;
816  } else if (command == "LTRIM") {
817  std::string k;
818  int i, j;
819  std::cin >> k >> i >> j;
820  redis.Trim(k, i, j);
821  } else if (command == "LSET") {
822  std::string k;
823  int idx;
824  std::string v;
825  cin >> k >> idx >> v;
826  redis.Set(k, idx, v);
827  } else if (command == "LINDEX") {
828  std::string k;
829  int idx;
830  std::cin >> k >> idx;
831  string res;
832  redis.Index(k, idx, &res);
833  std::cout << res << std::endl;
834  } else if (command == "PRINT") { // Added by Deon
835  std::string k;
836  cin >> k;
837  redis.Print(k);
838  } else if (command == "QUIT") {
839  return 0;
840  } else {
841  std::cout << "unknown command: " << command << std::endl;
842  }
843  }
844 }

Here is the call graph for this function:

Here is the caller graph for this function:

static void rocksdb::MarkKeyMayExist ( void *  arg)
static

Definition at line 302 of file version_set.cc.

References arg.

Referenced by rocksdb::Version::Get().

302  {
303  Saver* s = reinterpret_cast<Saver*>(arg);
304  s->state = kFound;
305  if (s->value_found != nullptr) {
306  *(s->value_found) = false;
307  }
308 }

Here is the caller graph for this function:

rocksdb::max_keys_ ( 1)

Referenced by is_input_key_hex_(), and print_stats_().

Here is the caller graph for this function:

rocksdb::max_keys_scanned_ ( 1)

Definition at line 1503 of file ldb_cmd.cc.

References end_key_specified_(), rocksdb::LDBCommandExecuteResult::FAILED(), HexToString(), and start_key_specified_().

1503  {
1504 
1505  map<string, string>::const_iterator itr = options.find(ARG_FROM);
1506  if (itr != options.end()) {
1507  start_key_ = itr->second;
1508  if (is_key_hex_) {
1509  start_key_ = HexToString(start_key_);
1510  }
1511  start_key_specified_ = true;
1512  }
1513  itr = options.find(ARG_TO);
1514  if (itr != options.end()) {
1515  end_key_ = itr->second;
1516  if (is_key_hex_) {
1517  end_key_ = HexToString(end_key_);
1518  }
1519  end_key_specified_ = true;
1520  }
1521 
1522  itr = options.find(ARG_MAX_KEYS);
1523  if (itr != options.end()) {
1524  try {
1525  max_keys_scanned_ = stoi(itr->second);
1526  } catch(const invalid_argument&) {
1527  exec_state_ = LDBCommandExecuteResult::FAILED(ARG_MAX_KEYS +
1528  " has an invalid value");
1529  } catch(const out_of_range&) {
1530  exec_state_ = LDBCommandExecuteResult::FAILED(ARG_MAX_KEYS +
1531  " has a value out-of-range");
1532  }
1533  }
1534 }

Here is the call graph for this function:

rocksdb::maxBucketValue_ ( bucketValues_.  back())
std::string rocksdb::MetaDatabaseName ( const std::string &  dbname,
uint64_t  number 
)

Definition at line 123 of file filename.cc.

Referenced by TEST().

123  {
124  char buf[100];
125  snprintf(buf, sizeof(buf), "/METADB-%llu",
126  static_cast<unsigned long long>(number));
127  return dbname + buf;
128 }

Here is the caller graph for this function:

rocksdb::minBucketValue_ ( bucketValues_.  front())

Definition at line 39 of file histogram.cc.

39  {
40  for (size_t i =0; i < bucketValues_.size(); ++i) {
41  valueIndexMap_[bucketValues_[i]] = i;
42  }
43 }
void rocksdb::MinLevelHelper ( DBTest *  self,
Options &  options 
)

Definition at line 2342 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, rocksdb::Options::level0_file_num_compaction_trigger, and RandomString().

Referenced by TEST().

2342  {
2343  Random rnd(301);
2344 
2345  for (int num = 0;
2346  num < options.level0_file_num_compaction_trigger - 1;
2347  num++)
2348  {
2349  std::vector<std::string> values;
2350  // Write 120KB (12 values, each 10K)
2351  for (int i = 0; i < 12; i++) {
2352  values.push_back(RandomString(&rnd, 10000));
2353  ASSERT_OK(self->Put(Key(i), values[i]));
2354  }
2355  self->dbfull()->TEST_WaitForFlushMemTable();
2356  ASSERT_EQ(self->NumTableFilesAtLevel(0), num + 1);
2357  }
2358 
2359  //generate one more file in level-0, and should trigger level-0 compaction
2360  std::vector<std::string> values;
2361  for (int i = 0; i < 12; i++) {
2362  values.push_back(RandomString(&rnd, 10000));
2363  ASSERT_OK(self->Put(Key(i), values[i]));
2364  }
2365  self->dbfull()->TEST_WaitForCompact();
2366 
2367  ASSERT_EQ(self->NumTableFilesAtLevel(0), 0);
2368  ASSERT_EQ(self->NumTableFilesAtLevel(1), 1);
2369 }

Here is the call graph for this function:

Here is the caller graph for this function:

bool rocksdb::MinLevelToCompress ( CompressionType &  type,
Options &  options,
int  wbits,
int  lev,
int  strategy 
)

Definition at line 2372 of file db_test.cc.

References BZip2CompressionSupported(), rocksdb::Options::compression_per_level, rocksdb::Options::create_if_missing, kBZip2Compression, kNoCompression, kSnappyCompression, kZlibCompression, rocksdb::Options::level0_file_num_compaction_trigger, rocksdb::Options::max_mem_compaction_level, rocksdb::Options::num_levels, SnappyCompressionSupported(), rocksdb::Options::write_buffer_size, and ZlibCompressionSupported().

Referenced by TEST().

2373  {
2374  fprintf(stderr, "Test with compression options : window_bits = %d, level = %d, strategy = %d}\n", wbits, lev, strategy);
2375  options.write_buffer_size = 100<<10; //100KB
2376  options.num_levels = 3;
2377  options.max_mem_compaction_level = 0;
2378  options.level0_file_num_compaction_trigger = 3;
2379  options.create_if_missing = true;
2380 
2381  if (SnappyCompressionSupported(CompressionOptions(wbits, lev, strategy))) {
2382  type = kSnappyCompression;
2383  fprintf(stderr, "using snappy\n");
2384  } else if (ZlibCompressionSupported(
2385  CompressionOptions(wbits, lev, strategy))) {
2386  type = kZlibCompression;
2387  fprintf(stderr, "using zlib\n");
2388  } else if (BZip2CompressionSupported(
2389  CompressionOptions(wbits, lev, strategy))) {
2390  type = kBZip2Compression;
2391  fprintf(stderr, "using bzip2\n");
2392  } else {
2393  fprintf(stderr, "skipping test, compression disabled\n");
2394  return false;
2395  }
2396  options.compression_per_level.resize(options.num_levels);
2397 
2398  // do not compress L0
2399  for (int i = 0; i < 1; i++) {
2400  options.compression_per_level[i] = kNoCompression;
2401  }
2402  for (int i = 1; i < options.num_levels; i++) {
2403  options.compression_per_level[i] = type;
2404  }
2405  return true;
2406 }

Here is the call graph for this function:

Here is the caller graph for this function:

rocksdb::new_compaction_style_ ( 1)

Definition at line 1107 of file ldb_cmd.cc.

References rocksdb::LDBCommandExecuteResult::FAILED(), kCompactionStyleLevel, kCompactionStyleUniversal, and old_compaction_style_().

1107  {
1108 
1109  ParseIntOption(option_map_, ARG_OLD_COMPACTION_STYLE, old_compaction_style_,
1110  exec_state_);
1113  exec_state_ = LDBCommandExecuteResult::FAILED(
1114  "Use --" + ARG_OLD_COMPACTION_STYLE + " to specify old compaction " +
1115  "style. Check ldb help for proper compaction style value.\n");
1116  return;
1117  }
1118 
1119  ParseIntOption(option_map_, ARG_NEW_COMPACTION_STYLE, new_compaction_style_,
1120  exec_state_);
1123  exec_state_ = LDBCommandExecuteResult::FAILED(
1124  "Use --" + ARG_NEW_COMPACTION_STYLE + " to specify new compaction " +
1125  "style. Check ldb help for proper compaction style value.\n");
1126  return;
1127  }
1128 
1130  exec_state_ = LDBCommandExecuteResult::FAILED(
1131  "Old compaction style is the same as new compaction style. "
1132  "Nothing to do.\n");
1133  return;
1134  }
1135 
1138  exec_state_ = LDBCommandExecuteResult::FAILED(
1139  "Convert from universal compaction to level compaction. "
1140  "Nothing to do.\n");
1141  return;
1142  }
1143 }

Here is the call graph for this function:

rocksdb::new_levels_ ( 1)

Referenced by print_old_levels_().

Here is the caller graph for this function:

const FilterPolicy * rocksdb::NewBloomFilterPolicy ( int  bits_per_key)

Definition at line 107 of file bloom.cc.

Referenced by rocksdb::DBTest::DBTest(), leveldb_filterpolicy_create_bloom(), ripple::NodeStore::LevelDBBackend::LevelDBBackend(), rocksdb::LDBCommand::PrepareOptionsForOpenDB(), and TEST().

107  {
108  return new BloomFilterPolicy(bits_per_key);
109 }

Here is the caller graph for this function:

Iterator * rocksdb::NewDBIterator ( const std::string *  dbname,
Env *  env,
const Options &  options,
const Comparator *  user_key_comparator,
Iterator *  internal_iter,
const SequenceNumber &  sequence 
)

Definition at line 470 of file db_iter.cc.

Referenced by rocksdb::DBImplReadOnly::NewIterator(), and rocksdb::DBImpl::NewIterator().

476  {
477  return new DBIter(dbname, env, options, user_key_comparator,
478  internal_iter, sequence);
479 }

Here is the caller graph for this function:

Iterator * rocksdb::NewEmptyIterator ( )

Definition at line 64 of file iterator.cc.

References rocksdb::Status::OK().

Referenced by rocksdb::Version::NewConcatenatingIterator(), rocksdb::Block::NewIterator(), rocksdb::BlockBasedTable::NewIterator(), and NewMergingIterator().

64  {
65  return new EmptyIterator(Status::OK());
66 }

Here is the call graph for this function:

Here is the caller graph for this function:

Iterator * rocksdb::NewErrorIterator ( const Status &  status)

Definition at line 68 of file iterator.cc.

Referenced by rocksdb::BlockBasedTable::BlockReader(), GetFileIterator(), rocksdb::BlockBasedTable::IndexBlockReader(), rocksdb::Block::NewIterator(), and rocksdb::TableCache::NewIterator().

68  {
69  return new EmptyIterator(status);
70 }

Here is the caller graph for this function:

static bool rocksdb::NewestFirst ( FileMetaData *  a,
FileMetaData *  b 
)
static

Definition at line 391 of file version_set.cc.

References rocksdb::FileMetaData::number.

Referenced by rocksdb::Version::Get().

391  {
392  return a->number > b->number;
393 }

Here is the caller graph for this function:

static bool rocksdb::NewestFirstBySeqNo ( FileMetaData *  a,
FileMetaData *  b 
)
static

Definition at line 394 of file version_set.cc.

References rocksdb::FileMetaData::largest_seqno, and rocksdb::FileMetaData::smallest_seqno.

Referenced by rocksdb::Version::Get().

394  {
395  if (a->smallest_seqno > b->smallest_seqno) {
396  assert(a->largest_seqno > b->largest_seqno);
397  return true;
398  }
399  assert(a->largest_seqno <= b->largest_seqno);
400  return false;
401 }

Here is the caller graph for this function:

const SliceTransform * rocksdb::NewFixedPrefixTransform ( size_t  prefix_len)

Definition at line 65 of file slice.cc.

Referenced by rocksdb::DBTest::CurrentOptions(), main(), rocksdb::Benchmark::Open(), rocksdb::StressTest::Open(), OpenDb(), rocksdb::PrefixTest::OpenDb(), and TEST().

65  {
66  return new FixedPrefixTransform(prefix_len);
67 }

Here is the caller graph for this function:

MemTableRepFactory * rocksdb::NewHashSkipListRepFactory ( const SliceTransform *  transform,
size_t  bucket_count = 1000000 
)

Definition at line 325 of file hash_skiplist_rep.cc.

Referenced by rocksdb::PrefixTest::OpenDb(), and TEST().

326  {
327  return new HashSkipListRepFactory(transform, bucket_count);
328 }

Here is the caller graph for this function:

shared_ptr< Cache > rocksdb::NewLRUCache ( size_t  capacity)
shared_ptr< Cache > rocksdb::NewLRUCache ( size_t  capacity,
int  numShardBits 
)

Definition at line 418 of file cache.cc.

References NewLRUCache().

418  {
419  return NewLRUCache(capacity, numShardBits, kRemoveScanCountLimit);
420 }

Here is the call graph for this function:

shared_ptr< Cache > rocksdb::NewLRUCache ( size_t  capacity,
int  numShardBits,
int  removeScanCountLimit 
)

Definition at line 422 of file cache.cc.

References numShardBits.

423  {
424  if (numShardBits >= 20) {
425  return nullptr; // the cache cannot be sharded into too many fine pieces
426  }
427  return std::make_shared<ShardedLRUCache>(capacity,
428  numShardBits,
429  removeScanCountLimit);
430 }
MaxIterHeap rocksdb::NewMaxIterHeap ( const Comparator *  comparator)

Definition at line 55 of file iter_heap.h.

55  {
56  return MaxIterHeap(MaxIteratorComparator(comparator));
57 }
Env * rocksdb::NewMemEnv ( Env *  base_env)

Definition at line 382 of file memenv.cc.

382  {
383  return new InMemoryEnv(base_env);
384 }
Iterator * rocksdb::NewMergingIterator ( const Comparator *  cmp,
Iterator **  list,
int  n 
)

Definition at line 217 of file merger.cc.

References NewEmptyIterator().

Referenced by rocksdb::VersionSet::MakeInputIterator(), rocksdb::DBImpl::NewInternalIterator(), and rocksdb::DBImpl::WriteLevel0Table().

217  {
218  assert(n >= 0);
219  if (n == 0) {
220  return NewEmptyIterator();
221  } else if (n == 1) {
222  return list[0];
223  } else {
224  return new MergingIterator(cmp, list, n);
225  }
226 }

Here is the call graph for this function:

Here is the caller graph for this function:

MinIterHeap rocksdb::NewMinIterHeap ( const Comparator *  comparator)

Definition at line 60 of file iter_heap.h.

60  {
61  return MinIterHeap(MinIteratorComparator(comparator));
62 }
const SliceTransform * rocksdb::NewNoopTransform ( )

Definition at line 69 of file slice.cc.

69  {
70  return new NoopTransform;
71 }
Iterator* rocksdb::NewTwoLevelIterator ( Iterator *  index_iter,
Iterator *(*)(void *arg, const ReadOptions &options, const EnvOptions &soptions, const Slice &index_value, bool for_compaction)  block_function,
void *  arg,
const ReadOptions &  options,
const EnvOptions &  soptions,
bool  for_compaction = false 
)
Iterator* rocksdb::NewTwoLevelIterator ( Iterator *  index_iter,
BlockFunction  block_function,
void *  arg,
const ReadOptions &  options,
const EnvOptions &  soptions,
bool  for_compaction 
)

Definition at line 194 of file two_level_iterator.cc.

Referenced by rocksdb::VersionSet::MakeInputIterator(), rocksdb::Version::NewConcatenatingIterator(), and rocksdb::BlockBasedTable::NewIterator().

200  {
201  return new TwoLevelIterator(index_iter, block_function, arg,
202  options, soptions, for_compaction);
203 }

Here is the caller graph for this function:

static int rocksdb::NextLength ( int  length)
static

Definition at line 106 of file bloom_test.cc.

Referenced by TEST().

106  {
107  if (length < 10) {
108  length += 1;
109  } else if (length < 100) {
110  length += 10;
111  } else if (length < 1000) {
112  length += 100;
113  } else {
114  length += 1000;
115  }
116  return length;
117 }

Here is the caller graph for this function:

rocksdb::null_from_ ( true  )

Referenced by null_to_(), and print_stats_().

Here is the caller graph for this function:

rocksdb::null_to_ ( true  )

Definition at line 338 of file ldb_cmd.cc.

References HexToString(), and null_from_().

Referenced by print_stats_().

338  {
339 
340  map<string, string>::const_iterator itr = options.find(ARG_FROM);
341  if (itr != options.end()) {
342  null_from_ = false;
343  from_ = itr->second;
344  }
345 
346  itr = options.find(ARG_TO);
347  if (itr != options.end()) {
348  null_to_ = false;
349  to_ = itr->second;
350  }
351 
352  if (is_key_hex_) {
353  if (!null_from_) {
354  from_ = HexToString(from_);
355  }
356  if (!null_to_) {
357  to_ = HexToString(to_);
358  }
359  }
360 }

Here is the call graph for this function:

Here is the caller graph for this function:

std::string rocksdb::NumberToString ( uint64_t  num)

Definition at line 41 of file logging.cc.

References AppendNumberTo().

Referenced by rocksdb::ChangeCompactionStyleCommand::DoCommand(), rocksdb::ReduceLevelTest::FilesOnLevel(), rocksdb::DBTest::NumTableFilesAtLevel(), rocksdb::SimpleTableDBTest::NumTableFilesAtLevel(), PrintContents(), rocksdb::StressTest::PrintEnv(), and TEST().

41  {
42  std::string r;
43  AppendNumberTo(&r, num);
44  return r;
45 }

Here is the call graph for this function:

Here is the caller graph for this function:

rocksdb::old_compaction_style_ ( 1)

Referenced by new_compaction_style_().

Here is the caller graph for this function:

rocksdb::old_levels_ ( 1<<  16)
std::string rocksdb::OldInfoLogFileName ( const std::string &  dbname,
uint64_t  ts,
const std::string &  db_path,
const std::string &  log_dir 
)

Definition at line 110 of file filename.cc.

References FlattenPath().

Referenced by CreateLoggerFromOptions(), and rocksdb::AutoRollLogger::RollLogFile().

111  {
112  char buf[50];
113  snprintf(buf, sizeof(buf), "%llu", static_cast<unsigned long long>(ts));
114 
115  if (log_dir.empty())
116  return dbname + "/LOG.old." + buf;
117 
118  char flatten_db_path[256];
119  FlattenPath(db_path, flatten_db_path, 256);
120  return log_dir + "/" + flatten_db_path + "_LOG.old." + buf;
121 }

Here is the call graph for this function:

Here is the caller graph for this function:

int rocksdb::OldLogFileCount ( const string &  dir)

Definition at line 238 of file auto_roll_logger_test.cc.

References kInfoLogFile, and ParseFileName().

238  {
239  std::vector<std::string> files;
240  Env::Default()->GetChildren(dir, &files);
241  int log_file_count = 0;
242 
243  for (std::vector<std::string>::iterator it = files.begin();
244  it != files.end(); ++it) {
245  uint64_t create_time;
246  FileType type;
247  if (!ParseFileName(*it, &create_time, &type)) {
248  continue;
249  }
250  if (type == kInfoLogFile && create_time > 0) {
251  ++log_file_count;
252  }
253  }
254 
255  return log_file_count;
256 }

Here is the call graph for this function:

std::shared_ptr<DB> rocksdb::OpenDb ( )

Definition at line 30 of file perf_context_test.cc.

References ASSERT_OK, rocksdb::Options::create_if_missing, db, FLAGS_max_write_buffer_number, FLAGS_min_write_buffer_number_to_merge, FLAGS_use_set_based_memetable, FLAGS_write_buffer_size, kDbName, rocksdb::Options::max_write_buffer_number, rocksdb::Options::memtable_factory, rocksdb::Options::min_write_buffer_number_to_merge, NewFixedPrefixTransform(), rocksdb::DB::Open(), and rocksdb::Options::write_buffer_size.

Referenced by ProfileKeyComparison(), rocksdb::StringAppendOperatorTest::SetOpenDbFunction(), and TEST().

30  {
31  DB* db;
32  Options options;
33  options.create_if_missing = true;
34  options.write_buffer_size = FLAGS_write_buffer_size;
35  options.max_write_buffer_number = FLAGS_max_write_buffer_number;
36  options.min_write_buffer_number_to_merge =
38 
40  auto prefix_extractor = rocksdb::NewFixedPrefixTransform(0);
41  options.memtable_factory =
42  std::make_shared<rocksdb::PrefixHashRepFactory>(prefix_extractor);
43  }
44 
45  Status s = DB::Open(options, kDbName, &db);
46  ASSERT_OK(s);
47  return std::shared_ptr<DB>(db);
48 }

Here is the call graph for this function:

Here is the caller graph for this function:

std::shared_ptr<DB> rocksdb::OpenNormalDb ( char  delim_char)

Definition at line 29 of file stringappend_test.cc.

References ASSERT_OK, rocksdb::Options::create_if_missing, db, kDbName, rocksdb::Options::merge_operator, and rocksdb::DB::Open().

Referenced by main().

29  {
30  DB* db;
31  Options options;
32  options.create_if_missing = true;
33  options.merge_operator.reset(new StringAppendOperator(delim_char));
34  ASSERT_OK(DB::Open(options, kDbName, &db));
35  return std::shared_ptr<DB>(db);
36 }

Here is the call graph for this function:

Here is the caller graph for this function:

void rocksdb::OpenTable ( const Options &  options,
const std::string &  contents,
std::unique_ptr< TableReader > *  table_reader 
)

Definition at line 96 of file table_properties_collector_test.cc.

References ASSERT_OK, and rocksdb::Options::table_factory.

Referenced by TEST().

99  {
100 
101  std::unique_ptr<RandomAccessFile> file(new FakeRandomeAccessFile(contents));
102  auto s = options.table_factory->GetTableReader(
103  options,
104  EnvOptions(),
105  std::move(file),
106  contents.size(),
107  table_reader
108  );
109  ASSERT_OK(s);
110 }

Here is the caller graph for this function:

std::shared_ptr<DB> rocksdb::OpenTtlDb ( char  delim_char)

Definition at line 39 of file stringappend_test.cc.

References ASSERT_OK, rocksdb::Options::create_if_missing, db, kDbName, and rocksdb::Options::merge_operator.

Referenced by main().

39  {
40  StackableDB* db;
41  Options options;
42  options.create_if_missing = true;
43  options.merge_operator.reset(new StringAppendTESTOperator(delim_char));
44  Status s;
45  db = new DBWithTTL(123456, options, kDbName, s, false);
46  ASSERT_OK(s);
47  return std::shared_ptr<DB>(db);
48 }

Here is the caller graph for this function:

bool rocksdb::operator!= ( const Slice &  x,
const Slice &  y 
)
inline

Definition at line 120 of file slice.h.

120  {
121  return !(x == y);
122 }
bool rocksdb::operator== ( const Slice &  x,
const Slice &  y 
)
inline

Definition at line 115 of file slice.h.

References rocksdb::Slice::data(), and rocksdb::Slice::size().

115  {
116  return ((x.size() == y.size()) &&
117  (memcmp(x.data(), y.data(), x.size()) == 0));
118 }

Here is the call graph for this function:

static uint64_t rocksdb::PackSequenceAndType ( uint64_t  seq,
ValueType  t 
)
static

Definition at line 18 of file dbformat.cc.

References kMaxSequenceNumber, and kValueTypeForSeek.

Referenced by AppendInternalKey(), rocksdb::InternalKeyComparator::FindShortestSeparator(), rocksdb::InternalKeyComparator::FindShortSuccessor(), and rocksdb::LookupKey::LookupKey().

18  {
19  assert(seq <= kMaxSequenceNumber);
20  assert(t <= kValueTypeForSeek);
21  return (seq << 8) | t;
22 }

Here is the caller graph for this function:

bool rocksdb::ParseFileName ( const std::string &  fname,
uint64_t number,
FileType *  type,
WalFileType *  log_type 
)

Definition at line 144 of file filename.cc.

References ARCHIVAL_DIR, ConsumeDecimalNumber(), rocksdb::Slice::empty(), kAliveLogFile, kArchivedLogFile, kCurrentFile, kDBLockFile, kDescriptorFile, kIdentityFile, kInfoLogFile, kLogFile, kMetaDatabase, kTableFile, kTempFile, rocksdb::Slice::remove_prefix(), rocksdb::Slice::size(), and rocksdb::Slice::starts_with().

Referenced by rocksdb::DBImpl::AppendSortedWalsOfType(), rocksdb::DeleteFileTest::CheckFileTypeCounts(), rocksdb::CorruptionTest::Corrupt(), rocksdb::DBImpl::DeleteFile(), DestroyDB(), GetLogDirSize(), ListLogFiles(), OldLogFileCount(), rocksdb::DBImpl::PurgeObsoleteFiles(), rocksdb::DBImpl::PurgeObsoleteWALFiles(), rocksdb::DBImpl::Recover(), and TEST().

147  {
148  Slice rest(fname);
149  if (fname.length() > 1 && fname[0] == '/') {
150  rest.remove_prefix(1);
151  }
152  if (rest == "IDENTITY") {
153  *number = 0;
154  *type = kIdentityFile;
155  } else if (rest == "CURRENT") {
156  *number = 0;
157  *type = kCurrentFile;
158  } else if (rest == "LOCK") {
159  *number = 0;
160  *type = kDBLockFile;
161  } else if (rest == "LOG" || rest == "LOG.old") {
162  *number = 0;
163  *type = kInfoLogFile;
164  } else if (rest.starts_with("LOG.old.")) {
165  uint64_t ts_suffix;
166  // sizeof also counts the trailing '\0'.
167  rest.remove_prefix(sizeof("LOG.old.") - 1);
168  if (!ConsumeDecimalNumber(&rest, &ts_suffix)) {
169  return false;
170  }
171  *number = ts_suffix;
172  *type = kInfoLogFile;
173  } else if (rest.starts_with("MANIFEST-")) {
174  rest.remove_prefix(strlen("MANIFEST-"));
175  uint64_t num;
176  if (!ConsumeDecimalNumber(&rest, &num)) {
177  return false;
178  }
179  if (!rest.empty()) {
180  return false;
181  }
182  *type = kDescriptorFile;
183  *number = num;
184  } else if (rest.starts_with("METADB-")) {
185  rest.remove_prefix(strlen("METADB-"));
186  uint64_t num;
187  if (!ConsumeDecimalNumber(&rest, &num)) {
188  return false;
189  }
190  if (!rest.empty()) {
191  return false;
192  }
193  *type = kMetaDatabase;
194  *number = num;
195  } else {
196  // Avoid strtoull() to keep filename format independent of the
197  // current locale
198  bool archive_dir_found = false;
199  if (rest.starts_with(ARCHIVAL_DIR)) {
200  if (rest.size() <= ARCHIVAL_DIR.size()) {
201  return false;
202  }
203  rest.remove_prefix(ARCHIVAL_DIR.size() + 1); // Add 1 to remove / also
204  if (log_type) {
205  *log_type = kArchivedLogFile;
206  }
207  archive_dir_found = true;
208  }
209  uint64_t num;
210  if (!ConsumeDecimalNumber(&rest, &num)) {
211  return false;
212  }
213  Slice suffix = rest;
214  if (suffix == Slice(".log")) {
215  *type = kLogFile;
216  if (log_type && !archive_dir_found) {
217  *log_type = kAliveLogFile;
218  }
219  } else if (archive_dir_found) {
220  return false; // Archive dir can contain only log files
221  } else if (suffix == Slice(".sst")) {
222  *type = kTableFile;
223  } else if (suffix == Slice(".dbtmp")) {
224  *type = kTempFile;
225  } else {
226  return false;
227  }
228  *number = num;
229  }
230  return true;
231 }

Here is the call graph for this function:

Here is the caller graph for this function:

bool rocksdb::ParseInternalKey ( const Slice &  internal_key,
ParsedInternalKey *  result 
)
inline

Definition at line 158 of file dbformat.h.

References rocksdb::Slice::data(), DecodeFixed64(), kValueTypeForSeek, rocksdb::ParsedInternalKey::sequence, rocksdb::Slice::size(), rocksdb::ParsedInternalKey::type, and rocksdb::ParsedInternalKey::user_key.

Referenced by rocksdb::InternalKeyPropertiesCollector::Add(), rocksdb::UserKeyTablePropertiesCollector::Add(), rocksdb::DBTest::AllEntriesFor(), BuildTable(), rocksdb::InternalKey::DebugString(), rocksdb::InternalDumpCommand::DoCommand(), rocksdb::DBImpl::DoCompactionWork(), rocksdb::KeyConvertingIterator::key(), rocksdb::MergeHelper::MergeUntil(), PrintContents(), rocksdb::SstFileReader::ReadSequential(), SaveValue(), TEST(), and TestKey().

159  {
160  const size_t n = internal_key.size();
161  if (n < 8) return false;
162  uint64_t num = DecodeFixed64(internal_key.data() + n - 8);
163  unsigned char c = num & 0xff;
164  result->sequence = num >> 8;
165  result->type = static_cast<ValueType>(c);
166  result->user_key = Slice(internal_key.data(), n - 8);
167  return (c <= static_cast<unsigned char>(kValueTypeForSeek));
168 }

Here is the call graph for this function:

Here is the caller graph for this function:

rocksdb::path_ ( ""  )

Definition at line 478 of file ldb_cmd.cc.

References rocksdb::LDBCommandExecuteResult::FAILED(), and verbose_().

Referenced by beast::ParsedURL::ParsedURL().

479 {
480  verbose_ = IsFlagPresent(flags, ARG_VERBOSE);
481 
482  map<string, string>::const_iterator itr = options.find(ARG_PATH);
483  if (itr != options.end()) {
484  path_ = itr->second;
485  if (path_.empty()) {
486  exec_state_ = LDBCommandExecuteResult::FAILED("--path: missing pathname");
487  }
488  }
489 }

Here is the call graph for this function:

Here is the caller graph for this function:

void rocksdb::PrefixScanInit ( DBTest *  dbtest)

Definition at line 4720 of file db_test.cc.

References ASSERT_OK, rocksdb::DBImpl::CompactRange(), rocksdb::DBTest::dbfull(), rocksdb::DBTest::Put(), and rocksdb::DBImpl::TEST_FlushMemTable().

Referenced by TEST().

4720  {
4721  char buf[100];
4722  std::string keystr;
4723  const int small_range_sstfiles = 5;
4724  const int big_range_sstfiles = 5;
4725 
4726  // Generate 11 sst files with the following prefix ranges.
4727  // GROUP 0: [0,10] (level 1)
4728  // GROUP 1: [1,2], [2,3], [3,4], [4,5], [5, 6] (level 0)
4729  // GROUP 2: [0,6], [0,7], [0,8], [0,9], [0,10] (level 0)
4730  //
4731  // A seek with the previous API would do 11 random I/Os (to all the
4732  // files). With the new API and a prefix filter enabled, we should
4733  // only do 2 random I/O, to the 2 files containing the key.
4734 
4735  // GROUP 0
4736  snprintf(buf, sizeof(buf), "%02d______:start", 0);
4737  keystr = std::string(buf);
4738  ASSERT_OK(dbtest->Put(keystr, keystr));
4739  snprintf(buf, sizeof(buf), "%02d______:end", 10);
4740  keystr = std::string(buf);
4741  ASSERT_OK(dbtest->Put(keystr, keystr));
4742  dbtest->dbfull()->TEST_FlushMemTable();
4743  dbtest->dbfull()->CompactRange(nullptr, nullptr); // move to level 1
4744 
4745  // GROUP 1
4746  for (int i = 1; i <= small_range_sstfiles; i++) {
4747  snprintf(buf, sizeof(buf), "%02d______:start", i);
4748  keystr = std::string(buf);
4749  ASSERT_OK(dbtest->Put(keystr, keystr));
4750  snprintf(buf, sizeof(buf), "%02d______:end", i+1);
4751  keystr = std::string(buf);
4752  ASSERT_OK(dbtest->Put(keystr, keystr));
4753  dbtest->dbfull()->TEST_FlushMemTable();
4754  }
4755 
4756  // GROUP 2
4757  for (int i = 1; i <= big_range_sstfiles; i++) {
4758  std::string keystr;
4759  snprintf(buf, sizeof(buf), "%02d______:start", 0);
4760  keystr = std::string(buf);
4761  ASSERT_OK(dbtest->Put(keystr, keystr));
4762  snprintf(buf, sizeof(buf), "%02d______:end",
4763  small_range_sstfiles+i+1);
4764  keystr = std::string(buf);
4765  ASSERT_OK(dbtest->Put(keystr, keystr));
4766  dbtest->dbfull()->TEST_FlushMemTable();
4767  }
4768 }

Here is the call graph for this function:

Here is the caller graph for this function:

rocksdb::print_header_ ( false  )

Referenced by print_values_().

Here is the caller graph for this function:

rocksdb::print_old_levels_ ( false  )

Definition at line 969 of file ldb_cmd.cc.

References rocksdb::LDBCommandExecuteResult::FAILED(), and new_levels_().

969  {
970 
971 
972  ParseIntOption(option_map_, ARG_NEW_LEVELS, new_levels_, exec_state_);
973  print_old_levels_ = IsFlagPresent(flags, ARG_PRINT_OLD_LEVELS);
974 
975  if(new_levels_ <= 0) {
976  exec_state_ = LDBCommandExecuteResult::FAILED(
977  " Use --" + ARG_NEW_LEVELS + " to specify a new level number\n");
978  }
979 }

Here is the call graph for this function:

rocksdb::print_stats_ ( false  )

Definition at line 763 of file ldb_cmd.cc.

References count_delim_(), count_only_(), delim_(), rocksdb::LDBCommandExecuteResult::FAILED(), HexToString(), max_keys_(), null_from_(), and null_to_().

Referenced by is_input_key_hex_().

763  {
764 
765  map<string, string>::const_iterator itr = options.find(ARG_FROM);
766  if (itr != options.end()) {
767  null_from_ = false;
768  from_ = itr->second;
769  }
770 
771  itr = options.find(ARG_TO);
772  if (itr != options.end()) {
773  null_to_ = false;
774  to_ = itr->second;
775  }
776 
777  itr = options.find(ARG_MAX_KEYS);
778  if (itr != options.end()) {
779  try {
780  max_keys_ = stoi(itr->second);
781  } catch(const invalid_argument&) {
782  exec_state_ = LDBCommandExecuteResult::FAILED(ARG_MAX_KEYS +
783  " has an invalid value");
784  } catch(const out_of_range&) {
785  exec_state_ = LDBCommandExecuteResult::FAILED(ARG_MAX_KEYS +
786  " has a value out-of-range");
787  }
788  }
789  itr = options.find(ARG_COUNT_DELIM);
790  if (itr != options.end()) {
791  delim_ = itr->second;
792  count_delim_ = true;
793  } else {
794  count_delim_ = IsFlagPresent(flags, ARG_COUNT_DELIM);
795  delim_=".";
796  }
797 
798  print_stats_ = IsFlagPresent(flags, ARG_STATS);
799  count_only_ = IsFlagPresent(flags, ARG_COUNT_ONLY);
800 
801  if (is_key_hex_) {
802  if (!null_from_) {
803  from_ = HexToString(from_);
804  }
805  if (!null_to_) {
806  to_ = HexToString(to_);
807  }
808  }
809 }

Here is the call graph for this function:

Here is the caller graph for this function:

rocksdb::print_values_ ( false  )

Definition at line 1259 of file ldb_cmd.cc.

References rocksdb::LDBCommandExecuteResult::FAILED(), and print_header_().

1259  {
1260 
1261  wal_file_.clear();
1262 
1263  map<string, string>::const_iterator itr = options.find(ARG_WAL_FILE);
1264  if (itr != options.end()) {
1265  wal_file_ = itr->second;
1266  }
1267 
1268 
1269  print_header_ = IsFlagPresent(flags, ARG_PRINT_HEADER);
1270  print_values_ = IsFlagPresent(flags, ARG_PRINT_VALUE);
1271  if (wal_file_.empty()) {
1272  exec_state_ = LDBCommandExecuteResult::FAILED(
1273  "Argument " + ARG_WAL_FILE + " must be specified.");
1274  }
1275 }

Here is the call graph for this function:

void rocksdb::PrintBucketCounts ( const vector< uint64_t > &  bucket_counts,
int  ttl_start,
int  ttl_end,
int  bucket_size,
int  num_buckets 
)

Definition at line 572 of file ldb_cmd.cc.

References ReadableTime().

Referenced by rocksdb::DBDumperCommand::DoCommand().

573  {
574  int time_point = ttl_start;
575  for(int i = 0; i < num_buckets - 1; i++, time_point += bucket_size) {
576  fprintf(stdout, "Keys in range %s to %s : %lu\n",
577  ReadableTime(time_point).c_str(),
578  ReadableTime(time_point + bucket_size).c_str(),
579  (unsigned long)bucket_counts[i]);
580  }
581  fprintf(stdout, "Keys in range %s to %s : %lu\n",
582  ReadableTime(time_point).c_str(),
583  ReadableTime(ttl_end).c_str(),
584  (unsigned long)bucket_counts[num_buckets - 1]);
585 }

Here is the call graph for this function:

Here is the caller graph for this function:

static std::string rocksdb::PrintContents ( WriteBatch *  b)
static

Definition at line 22 of file write_batch_test.cc.

References ASSERT_TRUE, BytewiseComparator(), rocksdb::WriteBatchInternal::Count(), rocksdb::WriteBatchInternal::InsertInto(), rocksdb::Iterator::key(), kTypeDeletion, kTypeLogData, kTypeMerge, kTypeValue, mem, rocksdb::Iterator::Next(), NumberToString(), rocksdb::Status::ok(), ParseInternalKey(), rocksdb::Iterator::SeekToFirst(), rocksdb::ParsedInternalKey::sequence, rocksdb::Slice::ToString(), rocksdb::Status::ToString(), rocksdb::ParsedInternalKey::type, rocksdb::ParsedInternalKey::user_key, rocksdb::Iterator::Valid(), and rocksdb::Iterator::value().

Referenced by TEST().

22  {
23  InternalKeyComparator cmp(BytewiseComparator());
24  auto factory = std::make_shared<SkipListFactory>();
25  MemTable* mem = new MemTable(cmp, factory);
26  mem->Ref();
27  std::string state;
28  Options options;
29  Status s = WriteBatchInternal::InsertInto(b, mem, &options);
30  int count = 0;
31  Iterator* iter = mem->NewIterator();
32  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
33  ParsedInternalKey ikey;
34  memset((void *)&ikey, 0, sizeof(ikey));
35  ASSERT_TRUE(ParseInternalKey(iter->key(), &ikey));
36  switch (ikey.type) {
37  case kTypeValue:
38  state.append("Put(");
39  state.append(ikey.user_key.ToString());
40  state.append(", ");
41  state.append(iter->value().ToString());
42  state.append(")");
43  count++;
44  break;
45  case kTypeMerge:
46  state.append("Merge(");
47  state.append(ikey.user_key.ToString());
48  state.append(", ");
49  state.append(iter->value().ToString());
50  state.append(")");
51  count++;
52  break;
53  case kTypeDeletion:
54  state.append("Delete(");
55  state.append(ikey.user_key.ToString());
56  state.append(")");
57  count++;
58  break;
59  case kTypeLogData:
60  assert(false);
61  break;
62  }
63  state.append("@");
64  state.append(NumberToString(ikey.sequence));
65  }
66  delete iter;
67  if (!s.ok()) {
68  state.append(s.ToString());
69  } else if (count != WriteBatchInternal::Count(b)) {
70  state.append("CountMismatch()");
71  }
72  mem->Unref();
73  return state;
74 }

Here is the call graph for this function:

Here is the caller graph for this function:

void rocksdb::ProfileKeyComparison ( )

Definition at line 167 of file perf_context_test.cc.

References rocksdb::HistogramImpl::Add(), db, DestroyDB(), FLAGS_random_key, FLAGS_total_keys, rocksdb::DBImpl::Get(), kDbName, OpenDb(), perf_context, rocksdb::DBImpl::Put(), rocksdb::PerfContext::Reset(), beast::IP::to_string(), rocksdb::HistogramImpl::ToString(), rocksdb::PerfContext::user_key_comparison_count, and value.

Referenced by TEST().

167  {
168  DestroyDB(kDbName, Options()); // Start this test with a fresh DB
169 
170  auto db = OpenDb();
171 
172  WriteOptions write_options;
173  ReadOptions read_options;
174 
175  HistogramImpl hist_put;
176  HistogramImpl hist_get;
177 
178  std::cout << "Inserting " << FLAGS_total_keys << " key/value pairs\n...\n";
179 
180  std::vector<int> keys;
181  for (int i = 0; i < FLAGS_total_keys; ++i) {
182  keys.push_back(i);
183  }
184 
185  if (FLAGS_random_key) {
186  std::random_shuffle(keys.begin(), keys.end());
187  }
188 
189  for (const int i : keys) {
190  std::string key = "k" + std::to_string(i);
191  std::string value = "v" + std::to_string(i);
192 
194  db->Put(write_options, key, value);
196 
198  db->Get(read_options, key, &value);
200  }
201 
202  std::cout << "Put uesr key comparison: \n" << hist_put.ToString()
203  << "Get uesr key comparison: \n" << hist_get.ToString();
204 
205 }

Here is the call graph for this function:

Here is the caller graph for this function:

void rocksdb::PutFixed32 ( std::string *  dst,
uint32_t  value 
)

Definition at line 42 of file coding.cc.

References EncodeFixed32(), and value.

Referenced by rocksdb::SimpleTableBuilder::Add(), rocksdb::TestHashFilter::CreateFilter(), EncodeKey(), rocksdb::Footer::EncodeTo(), rocksdb::BlockBuilder::Finish(), rocksdb::FilterBlockBuilder::Finish(), TEST(), and rocksdb::Blob::ToString().

42  {
43  char buf[sizeof(value)];
44  EncodeFixed32(buf, value);
45  dst->append(buf, sizeof(buf));
46 }

Here is the call graph for this function:

Here is the caller graph for this function:

void rocksdb::PutFixed64 ( std::string *  dst,
uint64_t  value 
)

Definition at line 48 of file coding.cc.

References EncodeFixed64(), and value.

Referenced by rocksdb::SimpleTableBuilder::Add(), AppendInternalKey(), rocksdb::InternalKeyComparator::FindShortestSeparator(), rocksdb::InternalKeyComparator::FindShortSuccessor(), rocksdb::SimpleTableBuilder::Finish(), Key(), and TEST().

48  {
49  char buf[sizeof(value)];
50  EncodeFixed64(buf, value);
51  dst->append(buf, sizeof(buf));
52 }

Here is the call graph for this function:

Here is the caller graph for this function:

void rocksdb::PutLengthPrefixedSlice ( std::string *  dst,
const Slice &  value 
)

Definition at line 105 of file coding.cc.

References rocksdb::Slice::data(), PutVarint32(), and rocksdb::Slice::size().

Referenced by rocksdb::WriteBatch::Delete(), rocksdb::VersionEdit::EncodeTo(), rocksdb::WriteBatch::Merge(), rocksdb::WriteBatch::Put(), rocksdb::WriteBatch::PutLogData(), and TEST().

105  {
106  PutVarint32(dst, value.size());
107  dst->append(value.data(), value.size());
108 }

Here is the call graph for this function:

Here is the caller graph for this function:

void rocksdb::PutLengthPrefixedSliceParts ( std::string *  dst,
const SliceParts &  slice_parts 
)

Definition at line 110 of file coding.cc.

References rocksdb::Slice::data(), rocksdb::SliceParts::num_parts, rocksdb::SliceParts::parts, PutVarint32(), and rocksdb::Slice::size().

Referenced by rocksdb::WriteBatch::Put().

111  {
112  uint32_t total_bytes = 0;
113  for (int i = 0; i < slice_parts.num_parts; ++i) {
114  total_bytes += slice_parts.parts[i].size();
115  }
116  PutVarint32(dst, total_bytes);
117  for (int i = 0; i < slice_parts.num_parts; ++i) {
118  dst->append(slice_parts.parts[i].data(), slice_parts.parts[i].size());
119  }
120 }

Here is the call graph for this function:

Here is the caller graph for this function:

void rocksdb::PutVarint32 ( std::string *  dst,
uint32_t  v 
)

Definition at line 82 of file coding.cc.

References EncodeVarint32().

Referenced by rocksdb::BlockBuilder::Add(), EncodeKey(), rocksdb::VersionEdit::EncodeTo(), rocksdb::RegularKeysStartWithA::Finish(), PutLengthPrefixedSlice(), PutLengthPrefixedSliceParts(), and TEST().

82  {
83  char buf[5];
84  char* ptr = EncodeVarint32(buf, v);
85  dst->append(buf, ptr - buf);
86 }

Here is the call graph for this function:

Here is the caller graph for this function:

void rocksdb::PutVarint64 ( std::string *  dst,
uint64_t  v 
)

Definition at line 99 of file coding.cc.

References EncodeVarint64().

Referenced by rocksdb::BlockHandle::EncodeTo(), rocksdb::VersionEdit::EncodeTo(), rocksdb::InternalKeyPropertiesCollector::Finish(), and TEST().

99  {
100  char buf[10];
101  char* ptr = EncodeVarint64(buf, v);
102  dst->append(buf, ptr - buf);
103 }

Here is the call graph for this function:

Here is the caller graph for this function:

static std::string rocksdb::RandomKey ( Random *  rnd,
int  minimum = 0 
)
static

Definition at line 4532 of file db_test.cc.

References rocksdb::Random::OneIn(), rocksdb::test::RandomKey(), rocksdb::Random::Skewed(), and rocksdb::Random::Uniform().

Referenced by TEST().

4532  {
4533  int len;
4534  do {
4535  len = (rnd->OneIn(3)
4536  ? 1 // Short sometimes to encourage collisions
4537  : (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
4538  } while (len < minimum);
4539  return test::RandomKey(rnd, len);
4540 }

Here is the call graph for this function:

Here is the caller graph for this function:

static std::string rocksdb::RandomString ( Random *  rnd,
int  len 
)
static

Definition at line 23 of file block_test.cc.

References rocksdb::test::RandomString().

23  {
24  std::string r;
25  test::RandomString(rnd, len, &r);
26  return r;
27 }

Here is the call graph for this function:

static std::string rocksdb::RandomString ( Random *  rnd,
int  len 
)
static

Definition at line 52 of file db_test.cc.

References rocksdb::test::RandomString().

Referenced by DataPumpThreadBody(), MinLevelHelper(), TEST(), and WorkerThreadBody().

52  {
53  std::string r;
54  test::RandomString(rnd, len, &r);
55  return r;
56 }

Here is the call graph for this function:

Here is the caller graph for this function:

static std::string rocksdb::RandomString ( Random *  rnd,
int  len 
)
static

Definition at line 749 of file simple_table_db_test.cc.

References rocksdb::test::RandomString().

749  {
750  std::string r;
751  test::RandomString(rnd, len, &r);
752  return r;
753 }

Here is the call graph for this function:

static std::string rocksdb::RandomString ( Random *  rnd,
int  len 
)
static

Definition at line 864 of file table_test.cc.

References rocksdb::test::RandomString().

864  {
865  std::string r;
866  test::RandomString(rnd, len, &r);
867  return r;
868 }

Here is the call graph for this function:

string rocksdb::ReadableTime ( int  unixtime)

Definition at line 554 of file ldb_cmd.cc.

Referenced by rocksdb::DBDumperCommand::DoCommand(), rocksdb::ScanCommand::DoCommand(), and PrintBucketCounts().

554  {
555  char time_buffer [80];
556  time_t rawtime = unixtime;
557  struct tm * timeinfo = localtime(&rawtime);
558  strftime(time_buffer, 80, "%c", timeinfo);
559  return string(time_buffer);
560 }

Here is the caller graph for this function:

Status rocksdb::ReadBlockContents ( RandomAccessFile *  file,
const ReadOptions &  options,
const BlockHandle &  handle,
BlockContents *  result,
Env *  env,
bool  do_uncompress 
)

Definition at line 75 of file format.cc.

References rocksdb::PerfContext::block_checksum_time, rocksdb::PerfContext::block_decompress_time, rocksdb::PerfContext::block_read_byte, rocksdb::PerfContext::block_read_count, rocksdb::PerfContext::block_read_time, BumpPerfCount(), BumpPerfTime(), rocksdb::BlockContents::cachable, rocksdb::BlockContents::compression_type, rocksdb::Status::Corruption(), rocksdb::BlockContents::data, DecodeFixed32(), rocksdb::BlockContents::heap_allocated, kBlockTrailerSize, kNoCompression, rocksdb::BlockHandle::offset(), rocksdb::Status::OK(), rocksdb::Status::ok(), perf_context, rocksdb::RandomAccessFile::Read(), rocksdb::BlockHandle::size(), StartPerfTimer(), UncompressBlockContents(), rocksdb::crc32c::Unmask(), rocksdb::crc32c::Value(), and rocksdb::ReadOptions::verify_checksums.

Referenced by rocksdb::BlockBasedTable::ReadFilter(), and rocksdb::BlockBasedTable::ReadProperties().

80  {
81  result->data = Slice();
82  result->cachable = false;
83  result->heap_allocated = false;
84 
85  // Read the block contents as well as the type/crc footer.
86  // See table_builder.cc for the code that built this structure.
87  size_t n = static_cast<size_t>(handle.size());
88  char* buf = new char[n + kBlockTrailerSize];
89  Slice contents;
90 
91  StopWatchNano timer(env);
92  StartPerfTimer(&timer);
93  Status s = file->Read(handle.offset(), n + kBlockTrailerSize, &contents, buf);
95  BumpPerfCount(&perf_context.block_read_byte, n + kBlockTrailerSize);
97 
98  if (!s.ok()) {
99  delete[] buf;
100  return s;
101  }
102  if (contents.size() != n + kBlockTrailerSize) {
103  delete[] buf;
104  return Status::Corruption("truncated block read");
105  }
106 
107  // Check the crc of the type and the block contents
108  const char* data = contents.data(); // Pointer to where Read put the data
109  if (options.verify_checksums) {
110  const uint32_t crc = crc32c::Unmask(DecodeFixed32(data + n + 1));
111  const uint32_t actual = crc32c::Value(data, n + 1);
112  if (actual != crc) {
113  delete[] buf;
114  s = Status::Corruption("block checksum mismatch");
115  return s;
116  }
118  }
119 
120  // If the caller has requested that the block not be uncompressed
121  if (!do_uncompress || data[n] == kNoCompression) {
122  if (data != buf) {
123  // File implementation gave us pointer to some other data.
124  // Use it directly under the assumption that it will be live
125  // while the file is open.
126  delete[] buf;
127  result->data = Slice(data, n);
128  result->heap_allocated = false;
129  result->cachable = false; // Do not double-cache
130  } else {
131  result->data = Slice(buf, n);
132  result->heap_allocated = true;
133  result->cachable = true;
134  }
135  result->compression_type = (rocksdb::CompressionType)data[n];
136  s = Status::OK();
137  } else {
138  s = UncompressBlockContents(data, n, result);
139  delete[] buf;
140  }
142  return s;
143 }

Here is the call graph for this function:

Here is the caller graph for this function:

Status rocksdb::ReadFileToString ( Env *  env,
const std::string &  fname,
std::string *  data 
)

Definition at line 92 of file env.cc.

References rocksdb::Slice::data(), rocksdb::Slice::empty(), rocksdb::Env::NewSequentialFile(), rocksdb::Status::ok(), rocksdb::SequentialFile::Read(), and rocksdb::Slice::size().

Referenced by rocksdb::CorruptionTest::Corrupt(), rocksdb::Env::GenerateUniqueId(), and rocksdb::VersionSet::Recover().

92  {
93  EnvOptions soptions;
94  data->clear();
95  unique_ptr<SequentialFile> file;
96  Status s = env->NewSequentialFile(fname, &file, soptions);
97  if (!s.ok()) {
98  return s;
99  }
100  static const int kBufferSize = 8192;
101  char* space = new char[kBufferSize];
102  while (true) {
103  Slice fragment;
104  s = file->Read(kBufferSize, &fragment, space);
105  if (!s.ok()) {
106  break;
107  }
108  data->append(fragment.data(), fragment.size());
109  if (fragment.empty()) {
110  break;
111  }
112  }
113  delete[] space;
114  return s;
115 }

Here is the call graph for this function:

Here is the caller graph for this function:

SequenceNumber rocksdb::ReadRecords ( std::unique_ptr< TransactionLogIterator > &  iter,
int &  count 
)

Definition at line 3994 of file db_test.cc.

References ASSERT_OK, ASSERT_TRUE, rocksdb::TransactionLogIterator::GetBatch(), rocksdb::TransactionLogIterator::Next(), rocksdb::BatchResult::sequence, rocksdb::TransactionLogIterator::status(), and rocksdb::TransactionLogIterator::Valid().

Referenced by ExpectRecords(), and TEST().

3996  {
3997  count = 0;
3998  SequenceNumber lastSequence = 0;
3999  BatchResult res;
4000  while (iter->Valid()) {
4001  res = iter->GetBatch();
4002  ASSERT_TRUE(res.sequence > lastSequence);
4003  ++count;
4004  lastSequence = res.sequence;
4005  ASSERT_OK(iter->status());
4006  iter->Next();
4007  }
4008  return res.sequence;
4009 }

Here is the call graph for this function:

Here is the caller graph for this function:

Status rocksdb::RepairDB ( const std::string &  dbname,
const Options &  options 
)

Definition at line 385 of file repair.cc.

Referenced by leveldb_repair_db(), rocksdb::CorruptionTest::RepairDB(), and TEST().

385  {
386  Repairer repairer(dbname, options);
387  return repairer.Run();
388 }

Here is the caller graph for this function:

static void rocksdb::RunConcurrent ( int  run)
static

Definition at line 353 of file skiplist_test.cc.

References ConcurrentReader(), rocksdb::Env::Default(), rocksdb::TestState::DONE, rocksdb::TestState::quit_flag_, rocksdb::test::RandomSeed(), rocksdb::port::AtomicPointer::Release_Store(), rocksdb::TestState::RUNNING, rocksdb::Env::Schedule(), rocksdb::TestState::t_, rocksdb::TestState::Wait(), and rocksdb::ConcurrentTest::WriteStep().

Referenced by TEST().

353  {
354  const int seed = test::RandomSeed() + (run * 100);
355  Random rnd(seed);
356  const int N = 1000;
357  const int kSize = 1000;
358  for (int i = 0; i < N; i++) {
359  if ((i % 100) == 0) {
360  fprintf(stderr, "Run %d of %d\n", i, N);
361  }
362  TestState state(seed + 1);
363  Env::Default()->Schedule(ConcurrentReader, &state);
364  state.Wait(TestState::RUNNING);
365  for (int i = 0; i < kSize; i++) {
366  state.t_.WriteStep(&rnd);
367  }
368  state.quit_flag_.Release_Store(&state); // Any non-nullptr arg will do
369  state.Wait(TestState::DONE);
370  }
371 }

Here is the call graph for this function:

Here is the caller graph for this function:

Options rocksdb::SanitizeOptions ( const std::string &  dbname,
const InternalKeyComparator *  icmp,
const InternalFilterPolicy *  ipolicy,
const Options &  src 
)

Definition at line 120 of file db_impl.cc.

References rocksdb::Options::arena_block_size, rocksdb::Options::block_cache, rocksdb::Options::block_size, rocksdb::Options::block_size_deviation, ClipToRange(), rocksdb::Options::compaction_filter, rocksdb::Options::comparator, rocksdb::Options::compression_per_level, CreateLoggerFromOptions(), rocksdb::Options::db_log_dir, dbname, rocksdb::Options::env, rocksdb::Options::filter_policy, rocksdb::Options::hard_rate_limit, rocksdb::Options::info_log, Log(), rocksdb::Options::max_mem_compaction_level, rocksdb::Options::max_open_files, rocksdb::Options::max_write_buffer_number, rocksdb::Options::memtable_factory, ripple::min(), rocksdb::Options::min_write_buffer_number_to_merge, NewLRUCache(), rocksdb::Options::no_block_cache, rocksdb::Options::num_levels, rocksdb::Status::ok(), rocksdb::Options::prefix_extractor, rocksdb::Options::soft_rate_limit, rocksdb::Options::table_properties_collectors, rocksdb::Options::wal_dir, and rocksdb::Options::write_buffer_size.

Referenced by DestroyDB(), and TEST().

123  {
124  Options result = src;
125  result.comparator = icmp;
126  result.filter_policy = (src.filter_policy != nullptr) ? ipolicy : nullptr;
127  ClipToRange(&result.max_open_files, 20, 1000000);
128  ClipToRange(&result.write_buffer_size, ((size_t)64)<<10,
129  ((size_t)64)<<30);
130  ClipToRange(&result.block_size, 1<<10, 4<<20);
131 
132  // if user sets arena_block_size, we trust user to use this value. Otherwise,
133  // calculate a proper value from writer_buffer_size;
134  if (result.arena_block_size <= 0) {
135  result.arena_block_size = result.write_buffer_size / 10;
136  }
137 
138  result.min_write_buffer_number_to_merge = std::min(
139  result.min_write_buffer_number_to_merge, result.max_write_buffer_number-1);
140  if (result.info_log == nullptr) {
141  Status s = CreateLoggerFromOptions(dbname, result.db_log_dir, src.env,
142  result, &result.info_log);
143  if (!s.ok()) {
144  // No place suitable for logging
145  result.info_log = nullptr;
146  }
147  }
148  if (result.block_cache == nullptr && !result.no_block_cache) {
149  result.block_cache = NewLRUCache(8 << 20);
150  }
151  result.compression_per_level = src.compression_per_level;
152  if (result.block_size_deviation < 0 || result.block_size_deviation > 100) {
153  result.block_size_deviation = 0;
154  }
155  if (result.max_mem_compaction_level >= result.num_levels) {
156  result.max_mem_compaction_level = result.num_levels - 1;
157  }
158  if (result.soft_rate_limit > result.hard_rate_limit) {
159  result.soft_rate_limit = result.hard_rate_limit;
160  }
161  if (result.compaction_filter) {
162  Log(result.info_log, "Compaction filter specified, ignore factory");
163  }
164  if (result.prefix_extractor) {
165  // If a prefix extractor has been supplied and a PrefixHashRepFactory is
166  // being used, make sure that the latter uses the former as its transform
167  // function.
168  auto factory = dynamic_cast<PrefixHashRepFactory*>(
169  result.memtable_factory.get());
170  if (factory &&
171  factory->GetTransform() != result.prefix_extractor) {
172  Log(result.info_log, "A prefix hash representation factory was supplied "
173  "whose prefix extractor does not match options.prefix_extractor. "
174  "Falling back to skip list representation factory");
175  result.memtable_factory = std::make_shared<SkipListFactory>();
176  } else if (factory) {
177  Log(result.info_log, "Prefix hash memtable rep is in use.");
178  }
179  }
180 
181  if (result.wal_dir.empty()) {
182  // Use dbname as default
183  result.wal_dir = dbname;
184  }
185 
186  // -- Sanitize the table properties collector
187  // All user defined properties collectors will be wrapped by
188  // UserKeyTablePropertiesCollector since for them they only have the
189  // knowledge of the user keys; internal keys are invisible to them.
190  auto& collectors = result.table_properties_collectors;
191  for (size_t i = 0; i < result.table_properties_collectors.size(); ++i) {
192  assert(collectors[i]);
193  collectors[i] =
194  std::make_shared<UserKeyTablePropertiesCollector>(collectors[i]);
195  }
196 
197  // Add collector to collect internal key statistics
198  collectors.push_back(
199  std::make_shared<InternalKeyPropertiesCollector>()
200  );
201 
202  return result;
203 }

Here is the call graph for this function:

Here is the caller graph for this function:

bool rocksdb::SaveDidIO ( void *  arg,
const Slice &  key,
const Slice &  value,
bool  didIO 
)

Definition at line 1053 of file block_based_table_reader.cc.

References arg.

Referenced by rocksdb::BlockBasedTable::TEST_KeyInCache().

1053  {
1054  *reinterpret_cast<bool*>(arg) = didIO;
1055  return false;
1056 }

Here is the caller graph for this function:

static bool rocksdb::SaveValue ( void *  arg,
const Slice &  ikey,
const Slice &  v,
bool  didIO 
)
static

Definition at line 310 of file version_set.cc.

References arg, rocksdb::Slice::data(), didIO, kTypeDeletion, kTypeLogData, kTypeMerge, kTypeValue, NUMBER_MERGE_FAILURES, ParseInternalKey(), RecordTick(), rocksdb::Slice::size(), beast::asio::swap(), rocksdb::Slice::ToString(), rocksdb::ParsedInternalKey::type, and rocksdb::ParsedInternalKey::user_key.

Referenced by rocksdb::Version::Get().

310  {
311  Saver* s = reinterpret_cast<Saver*>(arg);
312  std::deque<std::string>* const ops = s->merge_operands; // shorter alias
313  std::string merge_result; // temporary area for merge results later
314 
315  assert(s != nullptr && ops != nullptr);
316 
317  ParsedInternalKey parsed_key;
318  // TODO: didIO and Merge?
319  s->didIO = didIO;
320  if (!ParseInternalKey(ikey, &parsed_key)) {
321  // TODO: what about corrupt during Merge?
322  s->state = kCorrupt;
323  } else {
324  if (s->ucmp->Compare(parsed_key.user_key, s->user_key) == 0) {
325  // Key matches. Process it
326  switch (parsed_key.type) {
327  case kTypeValue:
328  if (kNotFound == s->state) {
329  s->state = kFound;
330  s->value->assign(v.data(), v.size());
331  } else if (kMerge == s->state) {
332  assert(s->merge_operator != nullptr);
333  s->state = kFound;
334  if (!s->merge_operator->FullMerge(s->user_key, &v, *ops,
335  s->value, s->logger)) {
336  RecordTick(s->statistics, NUMBER_MERGE_FAILURES);
337  s->state = kCorrupt;
338  }
339  } else {
340  assert(false);
341  }
342  return false;
343 
344  case kTypeDeletion:
345  if (kNotFound == s->state) {
346  s->state = kDeleted;
347  } else if (kMerge == s->state) {
348  s->state = kFound;
349  if (!s->merge_operator->FullMerge(s->user_key, nullptr, *ops,
350  s->value, s->logger)) {
351  RecordTick(s->statistics, NUMBER_MERGE_FAILURES);
352  s->state = kCorrupt;
353  }
354  } else {
355  assert(false);
356  }
357  return false;
358 
359  case kTypeMerge:
360  assert(s->state == kNotFound || s->state == kMerge);
361  s->state = kMerge;
362  ops->push_front(v.ToString());
363  while (ops->size() >= 2) {
364  // Attempt to merge operands together via user associateive merge
365  if (s->merge_operator->PartialMerge(s->user_key,
366  Slice((*ops)[0]),
367  Slice((*ops)[1]),
368  &merge_result,
369  s->logger)) {
370  ops->pop_front();
371  swap(ops->front(), merge_result);
372  } else {
373  // Associative merge returns false ==> stack the operands
374  break;
375  }
376  }
377  return true;
378 
379  case kTypeLogData:
380  assert(false);
381  break;
382  }
383  }
384  }
385 
386  // s->state could be Corrupt, merge or notfound
387 
388  return false;
389 }

Here is the call graph for this function:

Here is the caller graph for this function:

static void rocksdb::SetBool ( void *  ptr)
static

Definition at line 34 of file env_test.cc.

Referenced by TEST().

34  {
35  reinterpret_cast<port::AtomicPointer*>(ptr)->NoBarrier_Store(ptr);
36 }

Here is the caller graph for this function:

Status rocksdb::SetCurrentFile ( Env *  env,
const std::string &  dbname,
uint64_t  descriptor_number 
)

Definition at line 233 of file filename.cc.

References CurrentFileName(), rocksdb::Env::DeleteFile(), DescriptorFileName(), rocksdb::Status::ok(), rocksdb::Slice::remove_prefix(), rocksdb::Env::RenameFile(), rocksdb::Slice::starts_with(), TempFileName(), rocksdb::Slice::ToString(), and WriteStringToFileSync().

Referenced by rocksdb::VersionSet::LogAndApply(), and rocksdb::DBImpl::NewDB().

234  {
235  // Remove leading "dbname/" and add newline to manifest file name
236  std::string manifest = DescriptorFileName(dbname, descriptor_number);
237  Slice contents = manifest;
238  assert(contents.starts_with(dbname + "/"));
239  contents.remove_prefix(dbname.size() + 1);
240  std::string tmp = TempFileName(dbname, descriptor_number);
241  Status s = WriteStringToFileSync(env, contents.ToString() + "\n", tmp);
242  if (s.ok()) {
243  s = env->RenameFile(tmp, CurrentFileName(dbname));
244  }
245  if (!s.ok()) {
246  env->DeleteFile(tmp);
247  }
248  return s;
249 }

Here is the call graph for this function:

Here is the caller graph for this function:

Status rocksdb::SetIdentityFile ( Env *  env,
const std::string &  dbname 
)

Definition at line 251 of file filename.cc.

References rocksdb::Env::DeleteFile(), rocksdb::Env::GenerateUniqueId(), IdentityFileName(), rocksdb::Status::ok(), rocksdb::Env::RenameFile(), TempFileName(), and WriteStringToFileSync().

Referenced by rocksdb::DBImpl::Recover().

251  {
252  std::string id = env->GenerateUniqueId();
253  assert(!id.empty());
254  // Reserve the filename dbname/000000.dbtmp for the temporary identity file
255  std::string tmp = TempFileName(dbname, 0);
256  Status s = WriteStringToFileSync(env, id, tmp);
257  if (s.ok()) {
258  s = env->RenameFile(tmp, IdentityFileName(dbname));
259  }
260  if (!s.ok()) {
261  env->DeleteFile(tmp);
262  }
263  return s;
264 }

Here is the call graph for this function:

Here is the caller graph for this function:

void rocksdb::SetPerfLevel ( PerfLevel  level)

Definition at line 13 of file perf_context.cc.

References perf_level.

Referenced by TEST().

13 { perf_level = level; }

Here is the caller graph for this function:

void rocksdb::SetTickerCount ( std::shared_ptr< Statistics >  statistics,
Tickers  ticker,
uint64_t  count 
)
inline

Definition at line 290 of file statistics.h.

References HISTOGRAM_ENUM_MAX, HistogramsNameMap, TICKER_ENUM_MAX, and TickersNameMap.

Referenced by rocksdb::DBImpl::Recover(), and rocksdb::DBImpl::Write().

292  {
293  assert(HistogramsNameMap.size() == HISTOGRAM_ENUM_MAX);
294  assert(TickersNameMap.size() == TICKER_ENUM_MAX);
295  if (statistics) {
296  statistics->setTickerCount(ticker, count);
297  }
298 }

Here is the caller graph for this function:

static std::string rocksdb::Shorten ( const std::string &  s,
const std::string &  l 
)
static

Definition at line 24 of file dbformat_test.cc.

References BytewiseComparator(), and rocksdb::InternalKeyComparator::FindShortestSeparator().

Referenced by TEST().

24  {
25  std::string result = s;
26  InternalKeyComparator(BytewiseComparator()).FindShortestSeparator(&result, l);
27  return result;
28 }

Here is the call graph for this function:

Here is the caller graph for this function:

static std::string rocksdb::ShortSuccessor ( const std::string &  s)
static

Definition at line 30 of file dbformat_test.cc.

References BytewiseComparator(), and rocksdb::InternalKeyComparator::FindShortSuccessor().

Referenced by TEST().

30  {
31  std::string result = s;
32  InternalKeyComparator(BytewiseComparator()).FindShortSuccessor(&result);
33  return result;
34 }

Here is the call graph for this function:

Here is the caller graph for this function:

const TestKey* rocksdb::SliceToTestKey ( const Slice &  slice)
inline

Definition at line 43 of file prefix_test.cc.

References rocksdb::Slice::data().

Referenced by rocksdb::TestKeyComparator::Compare(), and TEST().

43  {
44  return (const TestKey*)slice.data();
45 }

Here is the call graph for this function:

Here is the caller graph for this function:

static bool rocksdb::SnappyCompressionSupported ( const CompressionOptions &  options)
static

Definition at line 34 of file db_test.cc.

References rocksdb::Slice::data(), Json::in(), rocksdb::Slice::size(), and rocksdb::port::Snappy_Compress().

Referenced by GenerateArgList(), MinLevelToCompress(), and TEST().

34  {
35  std::string out;
36  Slice in = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
37  return port::Snappy_Compress(options, in.data(), in.size(), &out);
38 }

Here is the call graph for this function:

Here is the caller graph for this function:

static bool rocksdb::SnappyCompressionSupported ( )
static

Definition at line 452 of file table_test.cc.

References rocksdb::Slice::data(), Json::in(), rocksdb::Slice::size(), and rocksdb::port::Snappy_Compress().

452  {
453  std::string out;
454  Slice in = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
455  return port::Snappy_Compress(Options().compression_opts,
456  in.data(), in.size(),
457  &out);
458 }

Here is the call graph for this function:

bool rocksdb::SomeFileOverlapsRange ( const InternalKeyComparator &  icmp,
bool  disjoint_sorted_files,
const std::vector< FileMetaData * > &  files,
const Slice *  smallest_user_key,
const Slice *  largest_user_key 
)

Definition at line 95 of file version_set.cc.

References AfterFile(), BeforeFile(), rocksdb::InternalKey::Encode(), ripple::f, FindFile(), kMaxSequenceNumber, kValueTypeForSeek, ucmp, and rocksdb::InternalKeyComparator::user_comparator().

Referenced by rocksdb::Version::OverlapInLevel(), and rocksdb::FindFileTest::Overlaps().

100  {
101  const Comparator* ucmp = icmp.user_comparator();
102  if (!disjoint_sorted_files) {
103  // Need to check against all files
104  for (size_t i = 0; i < files.size(); i++) {
105  const FileMetaData* f = files[i];
106  if (AfterFile(ucmp, smallest_user_key, f) ||
107  BeforeFile(ucmp, largest_user_key, f)) {
108  // No overlap
109  } else {
110  return true; // Overlap
111  }
112  }
113  return false;
114  }
115 
116  // Binary search over file list
117  uint32_t index = 0;
118  if (smallest_user_key != nullptr) {
119  // Find the earliest possible internal key for smallest_user_key
120  InternalKey small(*smallest_user_key, kMaxSequenceNumber,kValueTypeForSeek);
121  index = FindFile(icmp, files, small.Encode());
122  }
123 
124  if (index >= files.size()) {
125  // beginning of range is after all files, so no overlap.
126  return false;
127  }
128 
129  return !BeforeFile(ucmp, largest_user_key, files[index]);
130 }

Here is the call graph for this function:

Here is the caller graph for this function:

rocksdb::start_key_specified_ ( false  )

Referenced by max_keys_scanned_().

Here is the caller graph for this function:

void rocksdb::StartPerfTimer ( StopWatchNano *  timer)
inline

Definition at line 14 of file perf_context_imp.h.

References kEnableTime, perf_level, and rocksdb::StopWatchNano::Start().

Referenced by ReadBlockContents(), and rocksdb::DBImpl::Write().

14  {
16  timer->Start();
17  }
18 }

Here is the call graph for this function:

Here is the caller graph for this function:

std::vector< std::string > rocksdb::stringSplit ( string  arg,
char  delim 
)

Definition at line 17 of file string_util.cc.

Referenced by rocksdb::LDBCommand::InitFromCmdLineArgs(), and main().

17  {
18  vector<string> splits;
19  stringstream ss(arg);
20  string item;
21  while(getline(ss, item, delim)) {
22  splits.push_back(item);
23  }
24  return splits;
25 }

Here is the caller graph for this function:

std::string rocksdb::TableFileName ( const std::string &  name,
uint64_t  number 
)

Definition at line 73 of file filename.cc.

References MakeFileName().

Referenced by BuildTable(), rocksdb::TableCache::FindTable(), rocksdb::DBImpl::GetLiveFiles(), rocksdb::VersionSet::GetLiveFilesMetaData(), rocksdb::DBImpl::OpenCompactionOutputFile(), rocksdb::DBImpl::PurgeObsoleteFiles(), and TEST().

73  {
74  assert(number > 0);
75  return MakeFileName(name, number, "sst");
76 }

Here is the call graph for this function:

Here is the caller graph for this function:

void rocksdb::TableReaderBenchmark ( Options &  opts,
EnvOptions &  env_options,
ReadOptions &  read_options,
int  num_keys1,
int  num_keys2,
int  num_iter,
int  prefix_len,
bool  if_query_empty_keys,
bool  for_iterator,
bool  through_db 
)

Definition at line 54 of file table_reader_bench.cc.

References rocksdb::TableBuilder::Add(), rocksdb::HistogramImpl::Add(), arg, ASSERT_OK, ASSERT_TRUE, rocksdb::WritableFile::Close(), db, dbname, rocksdb::Env::Default(), rocksdb::Env::DeleteFile(), DestroyDB(), DummySaveValue(), env, rocksdb::TableBuilder::Finish(), rocksdb::DB::Flush(), rocksdb::TableReader::Get(), rocksdb::DB::Get(), rocksdb::Env::GetFileSize(), rocksdb::Iterator::key(), kNoCompression, MakeKey(), rocksdb::TableReader::NewIterator(), rocksdb::DB::NewIterator(), rocksdb::Env::NewRandomAccessFile(), rocksdb::Env::NewWritableFile(), rocksdb::Iterator::Next(), rocksdb::Env::NowMicros(), rocksdb::DB::Open(), std::chrono::prefix, rocksdb::ReadOptions::prefix, rocksdb::DB::Put(), rocksdb::Iterator::Seek(), rocksdb::Options::table_factory, rocksdb::test::TmpDir(), rocksdb::HistogramImpl::ToString(), rocksdb::Random::Uniform(), and rocksdb::Iterator::Valid().

Referenced by main().

58  {
59  Slice prefix = Slice();
60 
61  std::string file_name = test::TmpDir()
62  + "/rocksdb_table_reader_benchmark";
63  std::string dbname = test::TmpDir() + "/rocksdb_table_reader_bench_db";
64  ReadOptions ro;
65  WriteOptions wo;
66  unique_ptr<WritableFile> file;
67  Env* env = Env::Default();
68  TableBuilder* tb = nullptr;
69  DB* db = nullptr;
70  Status s;
71  if (!through_db) {
72  env->NewWritableFile(file_name, &file, env_options);
73  tb = opts.table_factory->GetTableBuilder(opts, file.get(),
75  } else {
76  s = DB::Open(opts, dbname, &db);
77  ASSERT_OK(s);
78  ASSERT_TRUE(db != nullptr);
79  }
80  // Populate slightly more than 1M keys
81  for (int i = 0; i < num_keys1; i++) {
82  for (int j = 0; j < num_keys2; j++) {
83  std::string key = MakeKey(i * 2, j, through_db);
84  if (!through_db) {
85  tb->Add(key, key);
86  } else {
87  db->Put(wo, key, key);
88  }
89  }
90  }
91  if (!through_db) {
92  tb->Finish();
93  file->Close();
94  } else {
95  db->Flush(FlushOptions());
96  }
97 
98  unique_ptr<TableReader> table_reader;
99  unique_ptr<RandomAccessFile> raf;
100  if (!through_db) {
101  Status s = env->NewRandomAccessFile(file_name, &raf, env_options);
102  uint64_t file_size;
103  env->GetFileSize(file_name, &file_size);
104  s = opts.table_factory->GetTableReader(opts, env_options, std::move(raf),
105  file_size, &table_reader);
106  }
107 
108  Random rnd(301);
109  std::string result;
110  HistogramImpl hist;
111 
112  void* arg = nullptr;
113  for (int it = 0; it < num_iter; it++) {
114  for (int i = 0; i < num_keys1; i++) {
115  for (int j = 0; j < num_keys2; j++) {
116  int r1 = rnd.Uniform(num_keys1) * 2;
117  int r2 = rnd.Uniform(num_keys2);
118  if (if_query_empty_keys) {
119  r1++;
120  r2 = num_keys2 * 2 - r2;
121  }
122 
123  if (!for_iterator) {
124  // Query one existing key;
125  std::string key = MakeKey(r1, r2, through_db);
126  uint64_t start_micros = env->NowMicros();
127  port::MemoryBarrier();
128  if (!through_db) {
129  s = table_reader->Get(ro, key, arg, DummySaveValue, nullptr);
130  } else {
131  s = db->Get(ro, key, &result);
132  }
133  port::MemoryBarrier();
134  hist.Add(env->NowMicros() - start_micros);
135  } else {
136  int r2_len;
137  if (if_query_empty_keys) {
138  r2_len = 0;
139  } else {
140  r2_len = rnd.Uniform(num_keys2) + 1;
141  if (r2_len + r2 > num_keys2) {
142  r2_len = num_keys2 - r2;
143  }
144  }
145  std::string start_key = MakeKey(r1, r2, through_db);
146  std::string end_key = MakeKey(r1, r2 + r2_len, through_db);
147  if (prefix_len < 16) {
148  prefix = Slice(start_key.data(), prefix_len);
149  read_options.prefix = &prefix;
150  }
151  uint64_t total_time = 0;
152  uint64_t start_micros = env->NowMicros();
153  port::MemoryBarrier();
154  Iterator* iter;
155  if (!through_db) {
156  iter = table_reader->NewIterator(read_options);
157  } else {
158  iter = db->NewIterator(read_options);
159  }
160  int count = 0;
161  for(iter->Seek(start_key); iter->Valid(); iter->Next()) {
162  if (if_query_empty_keys) {
163  break;
164  }
165  // verify key;
166  port::MemoryBarrier();
167  total_time += env->NowMicros() - start_micros;
168  assert(Slice(MakeKey(r1, r2 + count, through_db)) == iter->key());
169  start_micros = env->NowMicros();
170  if (++count >= r2_len) {
171  break;
172  }
173  }
174  if (count != r2_len) {
175  fprintf(
176  stderr, "Iterator cannot iterate expected number of entries. "
177  "Expected %d but got %d\n", r2_len, count);
178  assert(false);
179  }
180  delete iter;
181  port::MemoryBarrier();
182  total_time += env->NowMicros() - start_micros;
183  hist.Add(total_time);
184  }
185  }
186  }
187  }
188 
189  fprintf(
190  stderr,
191  "==================================================="
192  "====================================================\n"
193  "InMemoryTableSimpleBenchmark: %20s num_key1: %5d "
194  "num_key2: %5d %10s\n"
195  "==================================================="
196  "===================================================="
197  "\nHistogram (unit: microseconds): \n%s",
198  opts.table_factory->Name(), num_keys1, num_keys2,
199  for_iterator? "iterator" : (if_query_empty_keys ? "empty" : "non_empty"),
200  hist.ToString().c_str());
201  if (!through_db) {
202  env->DeleteFile(file_name);
203  } else {
204  delete db;
205  db = nullptr;
206  DestroyDB(dbname, opts);
207  }
208 }

Here is the call graph for this function:

Here is the caller graph for this function:

std::string rocksdb::TempFileName ( const std::string &  dbname,
uint64_t  number 
)

Definition at line 94 of file filename.cc.

References MakeFileName().

Referenced by SetCurrentFile(), SetIdentityFile(), and TEST().

94  {
95  assert(number >= 0);
96  return MakeFileName(dbname, number, "dbtmp");
97 }

Here is the call graph for this function:

Here is the caller graph for this function:

rocksdb::TEST ( HistogramTest  ,
BasicOperation   
)

Definition at line 14 of file histogram_test.cc.

References rocksdb::HistogramImpl::Add(), ASSERT_EQ, ASSERT_GT, ASSERT_LE, ASSERT_TRUE, rocksdb::HistogramImpl::Average(), rocksdb::HistogramImpl::Median(), and rocksdb::HistogramImpl::Percentile().

14  {
15 
16  HistogramImpl histogram;
17  for (uint64_t i = 1; i <= 100; i++) {
18  histogram.Add(i);
19  }
20 
21  {
22  double median = histogram.Median();
23  // ASSERT_LE(median, 50);
24  ASSERT_GT(median, 0);
25  }
26 
27  {
28  double percentile100 = histogram.Percentile(100.0);
29  ASSERT_LE(percentile100, 100.0);
30  ASSERT_GT(percentile100, 0.0);
31  double percentile99 = histogram.Percentile(99.0);
32  double percentile85 = histogram.Percentile(85.0);
33  ASSERT_LE(percentile99, 99.0);
34  ASSERT_TRUE(percentile99 >= percentile85);
35  }
36 
37  ASSERT_EQ(histogram.Average(), 50.5); // avg is acurately caluclated.
38 }

Here is the call graph for this function:

rocksdb::TEST ( ArenaImplTest  ,
Empty   
)

Definition at line 18 of file arena_test.cc.

18  {
19  ArenaImpl arena0;
20 }
rocksdb::TEST ( Coding  ,
Fixed32   
)

Definition at line 18 of file coding_test.cc.

References ASSERT_EQ, DecodeFixed32(), and PutFixed32().

18  {
19  std::string s;
20  for (uint32_t v = 0; v < 100000; v++) {
21  PutFixed32(&s, v);
22  }
23 
24  const char* p = s.data();
25  for (uint32_t v = 0; v < 100000; v++) {
26  uint32_t actual = DecodeFixed32(p);
27  ASSERT_EQ(v, actual);
28  p += sizeof(uint32_t);
29  }
30 }

Here is the call graph for this function:

rocksdb::TEST ( BlobStoreTest  ,
RangeParseTest   
)

Definition at line 20 of file blob_store_test.cc.

References ASSERT_EQ, rocksdb::Blob::chunks, rand, and rocksdb::Blob::ToString().

20  {
21  Blob e;
22  for (int i = 0; i < 5; ++i) {
23  e.chunks.push_back(BlobChunk(rand(), rand(), rand()));
24  }
25  string x = e.ToString();
26  Blob nx(x);
27 
28  ASSERT_EQ(nx.ToString(), x);
29 }

Here is the call graph for this function:

rocksdb::TEST ( FileNameTest  ,
Parse   
)

Definition at line 21 of file filename_test.cc.

References ASSERT_EQ, ASSERT_TRUE, db, ripple::f, kCurrentFile, kDBLockFile, kDescriptorFile, kInfoLogFile, kLogFile, kMetaDatabase, kTableFile, and ParseFileName().

21  {
22  Slice db;
23  FileType type;
24  uint64_t number;
25 
26  // Successful parses
27  static struct {
28  const char* fname;
29  uint64_t number;
30  FileType type;
31  } cases[] = {
32  { "100.log", 100, kLogFile },
33  { "0.log", 0, kLogFile },
34  { "0.sst", 0, kTableFile },
35  { "CURRENT", 0, kCurrentFile },
36  { "LOCK", 0, kDBLockFile },
37  { "MANIFEST-2", 2, kDescriptorFile },
38  { "MANIFEST-7", 7, kDescriptorFile },
39  { "METADB-2", 2, kMetaDatabase },
40  { "METADB-7", 7, kMetaDatabase },
41  { "LOG", 0, kInfoLogFile },
42  { "LOG.old", 0, kInfoLogFile },
43  { "18446744073709551615.log", 18446744073709551615ull, kLogFile },
44  };
45  for (unsigned int i = 0; i < sizeof(cases) / sizeof(cases[0]); i++) {
46  std::string f = cases[i].fname;
47  ASSERT_TRUE(ParseFileName(f, &number, &type)) << f;
48  ASSERT_EQ(cases[i].type, type) << f;
49  ASSERT_EQ(cases[i].number, number) << f;
50  }
51 
52  // Errors
53  static const char* errors[] = {
54  "",
55  "foo",
56  "foo-dx-100.log",
57  ".log",
58  "",
59  "manifest",
60  "CURREN",
61  "CURRENTX",
62  "MANIFES",
63  "MANIFEST",
64  "MANIFEST-",
65  "XMANIFEST-3",
66  "MANIFEST-3x",
67  "META",
68  "METADB",
69  "METADB-",
70  "XMETADB-3",
71  "METADB-3x",
72  "LOC",
73  "LOCKx",
74  "LO",
75  "LOGx",
76  "18446744073709551616.log",
77  "184467440737095516150.log",
78  "100",
79  "100.",
80  "100.lop"
81  };
82  for (unsigned int i = 0; i < sizeof(errors) / sizeof(errors[0]); i++) {
83  std::string f = errors[i];
84  ASSERT_TRUE(!ParseFileName(f, &number, &type)) << f;
85  };
86 }

Here is the call graph for this function:

rocksdb::TEST ( ArenaImplTest  ,
MemoryAllocatedBytes   
)

Definition at line 22 of file arena_test.cc.

References rocksdb::ArenaImpl::Allocate(), ASSERT_EQ, and rocksdb::ArenaImpl::MemoryAllocatedBytes().

22  {
23  const int N = 17;
24  size_t req_sz; //requested size
25  size_t bsz = 8192; // block size
26  size_t expected_memory_allocated;
27 
28  ArenaImpl arena_impl(bsz);
29 
30  // requested size > quarter of a block:
31  // allocate requested size separately
32  req_sz = 3001;
33  for (int i = 0; i < N; i++) {
34  arena_impl.Allocate(req_sz);
35  }
36  expected_memory_allocated = req_sz * N;
37  ASSERT_EQ(arena_impl.MemoryAllocatedBytes(), expected_memory_allocated);
38 
39  // requested size < quarter of a block:
40  // allocate a block with the default size, then try to use unused part
41  // of the block. So one new block will be allocated for the first
42  // Allocate(99) call. All the remaining calls won't lead to new allocation.
43  req_sz = 99;
44  for (int i = 0; i < N; i++) {
45  arena_impl.Allocate(req_sz);
46  }
47  expected_memory_allocated += bsz;
48  ASSERT_EQ(arena_impl.MemoryAllocatedBytes(), expected_memory_allocated);
49 
50  // requested size > quarter of a block:
51  // allocate requested size separately
52  req_sz = 99999999;
53  for (int i = 0; i < N; i++) {
54  arena_impl.Allocate(req_sz);
55  }
56  expected_memory_allocated += req_sz * N;
57  ASSERT_EQ(arena_impl.MemoryAllocatedBytes(), expected_memory_allocated);
58 }

Here is the call graph for this function:

rocksdb::TEST ( VersionEditTest  ,
EncodeDecode   
)

Definition at line 27 of file version_edit_test.cc.

References rocksdb::VersionEdit::AddFile(), rocksdb::VersionEdit::DeleteFile(), kTypeDeletion, kTypeValue, rocksdb::VersionEdit::SetCompactPointer(), rocksdb::VersionEdit::SetComparatorName(), rocksdb::VersionEdit::SetLastSequence(), rocksdb::VersionEdit::SetLogNumber(), rocksdb::VersionEdit::SetNextFile(), and TestEncodeDecode().

27  {
28  static const uint64_t kBig = 1ull << 50;
29 
30  VersionEdit edit(7);
31  for (int i = 0; i < 4; i++) {
32  TestEncodeDecode(edit);
33  edit.AddFile(3, kBig + 300 + i, kBig + 400 + i,
34  InternalKey("foo", kBig + 500 + i, kTypeValue),
35  InternalKey("zoo", kBig + 600 + i, kTypeDeletion),
36  kBig + 500 + i,
37  kBig + 600 + i);
38  edit.DeleteFile(4, kBig + 700 + i);
39  edit.SetCompactPointer(i, InternalKey("x", kBig + 900 + i, kTypeValue));
40  }
41 
42  edit.SetComparatorName("foo");
43  edit.SetLogNumber(kBig + 100);
44  edit.SetNextFile(kBig + 200);
45  edit.SetLastSequence(kBig + 1000);
46  TestEncodeDecode(edit);
47 }

Here is the call graph for this function:

rocksdb::TEST ( MemEnvTest  ,
Basics   
)

Definition at line 30 of file memenv_test.cc.

References rocksdb::WritableFile::Append(), ASSERT_EQ, ASSERT_OK, ASSERT_TRUE, rocksdb::Env::CreateDir(), rocksdb::Env::DeleteDir(), rocksdb::Env::DeleteFile(), env_, rocksdb::Env::FileExists(), rocksdb::Env::GetChildren(), rocksdb::Env::GetFileSize(), rocksdb::Env::NewRandomAccessFile(), rocksdb::Env::NewSequentialFile(), rocksdb::Env::NewWritableFile(), rocksdb::Status::ok(), rocksdb::Env::RenameFile(), and soptions_.

30  {
31  uint64_t file_size;
32  unique_ptr<WritableFile> writable_file;
33  std::vector<std::string> children;
34 
35  ASSERT_OK(env_->CreateDir("/dir"));
36 
37  // Check that the directory is empty.
38  ASSERT_TRUE(!env_->FileExists("/dir/non_existent"));
39  ASSERT_TRUE(!env_->GetFileSize("/dir/non_existent", &file_size).ok());
40  ASSERT_OK(env_->GetChildren("/dir", &children));
41  ASSERT_EQ(0U, children.size());
42 
43  // Create a file.
44  ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file, soptions_));
45  writable_file.reset();
46 
47  // Check that the file exists.
48  ASSERT_TRUE(env_->FileExists("/dir/f"));
49  ASSERT_OK(env_->GetFileSize("/dir/f", &file_size));
50  ASSERT_EQ(0U, file_size);
51  ASSERT_OK(env_->GetChildren("/dir", &children));
52  ASSERT_EQ(1U, children.size());
53  ASSERT_EQ("f", children[0]);
54 
55  // Write to the file.
56  ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file, soptions_));
57  ASSERT_OK(writable_file->Append("abc"));
58  writable_file.reset();
59 
60  // Check for expected size.
61  ASSERT_OK(env_->GetFileSize("/dir/f", &file_size));
62  ASSERT_EQ(3U, file_size);
63 
64  // Check that renaming works.
65  ASSERT_TRUE(!env_->RenameFile("/dir/non_existent", "/dir/g").ok());
66  ASSERT_OK(env_->RenameFile("/dir/f", "/dir/g"));
67  ASSERT_TRUE(!env_->FileExists("/dir/f"));
68  ASSERT_TRUE(env_->FileExists("/dir/g"));
69  ASSERT_OK(env_->GetFileSize("/dir/g", &file_size));
70  ASSERT_EQ(3U, file_size);
71 
72  // Check that opening non-existent file fails.
73  unique_ptr<SequentialFile> seq_file;
74  unique_ptr<RandomAccessFile> rand_file;
75  ASSERT_TRUE(!env_->NewSequentialFile("/dir/non_existent", &seq_file,
76  soptions_).ok());
77  ASSERT_TRUE(!seq_file);
78  ASSERT_TRUE(!env_->NewRandomAccessFile("/dir/non_existent", &rand_file,
79  soptions_).ok());
80  ASSERT_TRUE(!rand_file);
81 
82  // Check that deleting works.
83  ASSERT_TRUE(!env_->DeleteFile("/dir/non_existent").ok());
84  ASSERT_OK(env_->DeleteFile("/dir/g"));
85  ASSERT_TRUE(!env_->FileExists("/dir/g"));
86  ASSERT_OK(env_->GetChildren("/dir", &children));
87  ASSERT_EQ(0U, children.size());
88  ASSERT_OK(env_->DeleteDir("/dir"));
89 }

Here is the call graph for this function:

rocksdb::TEST ( BlockTest  ,
SimpleTest   
)

Definition at line 32 of file block_test.cc.

References rocksdb::BlockBuilder::Add(), ASSERT_EQ, ASSERT_TRUE, rocksdb::BlockContents::cachable, rocksdb::Options::comparator, rocksdb::BlockContents::data, rocksdb::BlockBuilder::Finish(), rocksdb::BlockContents::heap_allocated, rocksdb::Iterator::key(), rocksdb::Block::NewIterator(), rocksdb::Iterator::Next(), RandomString(), rocksdb::Iterator::Seek(), rocksdb::Iterator::SeekToFirst(), rocksdb::Slice::ToString(), rocksdb::Random::Uniform(), rocksdb::Iterator::Valid(), rocksdb::Iterator::value(), and value.

32  {
33  Random rnd(301);
34  Options options = Options();
35  std::vector<std::string> keys;
36  std::vector<std::string> values;
37  BlockBuilder builder(options);
38  int num_records = 100000;
39  char buf[10];
40  char* p = &buf[0];
41 
42  // add a bunch of records to a block
43  for (int i = 0; i < num_records; i++) {
44  // generate random kvs
45  sprintf(p, "%6d", i);
46  std::string k(p);
47  std::string v = RandomString(&rnd, 100); // 100 byte values
48 
49  // write kvs to the block
50  Slice key(k);
51  Slice value(v);
52  builder.Add(key, value);
53 
54  // remember kvs in a lookaside array
55  keys.push_back(k);
56  values.push_back(v);
57  }
58 
59  // read serialized contents of the block
60  Slice rawblock = builder.Finish();
61 
62  // create block reader
63  BlockContents contents;
64  contents.data = rawblock;
65  contents.cachable = false;
66  contents.heap_allocated = false;
67  Block reader(contents);
68 
69  // read contents of block sequentially
70  int count = 0;
71  Iterator* iter = reader.NewIterator(options.comparator);
72  for (iter->SeekToFirst();iter->Valid(); count++, iter->Next()) {
73 
74  // read kv from block
75  Slice k = iter->key();
76  Slice v = iter->value();
77 
78  // compare with lookaside array
79  ASSERT_EQ(k.ToString().compare(keys[count]), 0);
80  ASSERT_EQ(v.ToString().compare(values[count]), 0);
81  }
82  delete iter;
83 
84  // read block contents randomly
85  iter = reader.NewIterator(options.comparator);
86  for (int i = 0; i < num_records; i++) {
87 
88  // find a random key in the lookaside array
89  int index = rnd.Uniform(num_records);
90  Slice k(keys[index]);
91 
92  // search in block for this key
93  iter->Seek(k);
94  ASSERT_TRUE(iter->Valid());
95  Slice v = iter->value();
96  ASSERT_EQ(v.ToString().compare(values[index]), 0);
97  }
98  delete iter;
99 }

Here is the call graph for this function:

rocksdb::TEST ( BlobStoreTest  ,
SanityTest   
)

Definition at line 32 of file blob_store_test.cc.

References ASSERT_EQ, ASSERT_OK, rocksdb::Blob::chunks, rocksdb::Env::Default(), rocksdb::BlobStore::Delete(), rocksdb::BlobStore::Put(), rocksdb::test::RandomString(), and rocksdb::test::TmpDir().

32  {
33  const uint64_t block_size = 10;
34  const uint32_t blocks_per_file = 20;
35  Random random(5);
36 
37  BlobStore blob_store(test::TmpDir() + "/blob_store_test",
38  block_size,
39  blocks_per_file,
40  1000,
41  Env::Default());
42 
43  string buf;
44 
45  // put string of size 170
46  test::RandomString(&random, 170, &buf);
47  Blob r1;
48  ASSERT_OK(blob_store.Put(Slice(buf), &r1));
49  // use the first file
50  for (size_t i = 0; i < r1.chunks.size(); ++i) {
51  ASSERT_EQ(r1.chunks[0].bucket_id, 0u);
52  }
53 
54  // put string of size 30
55  test::RandomString(&random, 30, &buf);
56  Blob r2;
57  ASSERT_OK(blob_store.Put(Slice(buf), &r2));
58  // use the first file
59  for (size_t i = 0; i < r2.chunks.size(); ++i) {
60  ASSERT_EQ(r2.chunks[0].bucket_id, 0u);
61  }
62 
63  // delete blob of size 170
64  ASSERT_OK(blob_store.Delete(r1));
65 
66  // put a string of size 100
67  test::RandomString(&random, 100, &buf);
68  Blob r3;
69  ASSERT_OK(blob_store.Put(Slice(buf), &r3));
70  // use the first file
71  for (size_t i = 0; i < r3.chunks.size(); ++i) {
72  ASSERT_EQ(r3.chunks[0].bucket_id, 0u);
73  }
74 
75  // put a string of size 70
76  test::RandomString(&random, 70, &buf);
77  Blob r4;
78  ASSERT_OK(blob_store.Put(Slice(buf), &r4));
79  // use the first file
80  for (size_t i = 0; i < r4.chunks.size(); ++i) {
81  ASSERT_EQ(r4.chunks[0].bucket_id, 0u);
82  }
83 
84  // put a string of size 5
85  test::RandomString(&random, 5, &buf);
86  Blob r5;
87  ASSERT_OK(blob_store.Put(Slice(buf), &r5));
88  // now you get to use the second file
89  for (size_t i = 0; i < r5.chunks.size(); ++i) {
90  ASSERT_EQ(r5.chunks[0].bucket_id, 1u);
91  }
92 }

Here is the call graph for this function:

rocksdb::TEST ( Coding  ,
Fixed64   
)

Definition at line 32 of file coding_test.cc.

References ASSERT_EQ, DecodeFixed64(), and PutFixed64().

32  {
33  std::string s;
34  for (int power = 0; power <= 63; power++) {
35  uint64_t v = static_cast<uint64_t>(1) << power;
36  PutFixed64(&s, v - 1);
37  PutFixed64(&s, v + 0);
38  PutFixed64(&s, v + 1);
39  }
40 
41  const char* p = s.data();
42  for (int power = 0; power <= 63; power++) {
43  uint64_t v = static_cast<uint64_t>(1) << power;
44  uint64_t actual;
45  actual = DecodeFixed64(p);
46  ASSERT_EQ(v-1, actual);
47  p += sizeof(uint64_t);
48 
49  actual = DecodeFixed64(p);
50  ASSERT_EQ(v+0, actual);
51  p += sizeof(uint64_t);
52 
53  actual = DecodeFixed64(p);
54  ASSERT_EQ(v+1, actual);
55  p += sizeof(uint64_t);
56  }
57 }

Here is the call graph for this function:

rocksdb::TEST ( SkipTest  ,
Empty   
)

Definition at line 36 of file skiplist_test.cc.

References ASSERT_TRUE, and rocksdb::SkipList< Key, Comparator >::Contains().

36  {
37  ArenaImpl arena_impl;
38  TestComparator cmp;
39  SkipList<Key, TestComparator> list(cmp, &arena_impl);
40  ASSERT_TRUE(!list.Contains(10));
41 
42  SkipList<Key, TestComparator>::Iterator iter(&list);
43  ASSERT_TRUE(!iter.Valid());
44  iter.SeekToFirst();
45  ASSERT_TRUE(!iter.Valid());
46  iter.Seek(100);
47  ASSERT_TRUE(!iter.Valid());
48  iter.SeekToLast();
49  ASSERT_TRUE(!iter.Valid());
50 }

Here is the call graph for this function:

rocksdb::TEST ( EnvPosixTest  ,
RunImmediately   
)

Definition at line 38 of file env_test.cc.

References ASSERT_TRUE, rocksdb::Env::Default(), env_, kDelayMicros, rocksdb::port::AtomicPointer::NoBarrier_Load(), rocksdb::Env::Schedule(), SetBool(), and rocksdb::Env::SleepForMicroseconds().

38  {
39  port::AtomicPointer called (nullptr);
40  env_->Schedule(&SetBool, &called);
41  Env::Default()->SleepForMicroseconds(kDelayMicros);
42  ASSERT_TRUE(called.NoBarrier_Load() != nullptr);
43 }

Here is the call graph for this function:

rocksdb::TEST ( LockTest  ,
LockBySameThread   
)

Definition at line 39 of file filelock_test.cc.

References ASSERT_OK, and ASSERT_TRUE.

39  {
40  FileLock* lock1;
41  FileLock* lock2;
42 
43  // acquire a lock on a file
44  ASSERT_OK(LockFile(&lock1));
45 
46  // re-acquire the lock on the same file. This should fail.
47  ASSERT_TRUE(LockFile(&lock2).IsIOError());
48 
49  // release the lock
50  ASSERT_OK(UnlockFile(lock1));
51 
52 }
rocksdb::TEST ( HistogramTest  ,
EmptyHistogram   
)

Definition at line 40 of file histogram_test.cc.

References ASSERT_EQ, rocksdb::HistogramImpl::Average(), rocksdb::HistogramImpl::Median(), and rocksdb::HistogramImpl::Percentile().

40  {
41  HistogramImpl histogram;
42  ASSERT_EQ(histogram.Median(), 0.0);
43  ASSERT_EQ(histogram.Percentile(85.0), 0.0);
44  ASSERT_EQ(histogram.Average(), 0.0);
45 }

Here is the call graph for this function:

rocksdb::TEST ( EnvPosixTest  ,
RunMany   
)

Definition at line 45 of file env_test.cc.

References rocksdb::port::AtomicPointer::Acquire_Load(), ASSERT_EQ, rocksdb::Env::Default(), env_, id, kDelayMicros, rocksdb::Env::Schedule(), and rocksdb::Env::SleepForMicroseconds().

45  {
46  port::AtomicPointer last_id (nullptr);
47 
48  struct CB {
49  port::AtomicPointer* last_id_ptr; // Pointer to shared slot
50  uintptr_t id; // Order# for the execution of this callback
51 
52  CB(port::AtomicPointer* p, int i) : last_id_ptr(p), id(i) { }
53 
54  static void Run(void* v) {
55  CB* cb = reinterpret_cast<CB*>(v);
56  void* cur = cb->last_id_ptr->NoBarrier_Load();
57  ASSERT_EQ(cb->id-1, reinterpret_cast<uintptr_t>(cur));
58  cb->last_id_ptr->Release_Store(reinterpret_cast<void*>(cb->id));
59  }
60  };
61 
62  // Schedule in different order than start time
63  CB cb1(&last_id, 1);
64  CB cb2(&last_id, 2);
65  CB cb3(&last_id, 3);
66  CB cb4(&last_id, 4);
67  env_->Schedule(&CB::Run, &cb1);
68  env_->Schedule(&CB::Run, &cb2);
69  env_->Schedule(&CB::Run, &cb3);
70  env_->Schedule(&CB::Run, &cb4);
71 
72  Env::Default()->SleepForMicroseconds(kDelayMicros);
73  void* cur = last_id.Acquire_Load();
74  ASSERT_EQ(4U, reinterpret_cast<uintptr_t>(cur));
75 }

Here is the call graph for this function:

rocksdb::TEST ( HistogramTest  ,
ClearHistogram   
)

Definition at line 47 of file histogram_test.cc.

References rocksdb::HistogramImpl::Add(), ASSERT_EQ, rocksdb::HistogramImpl::Average(), rocksdb::HistogramImpl::Clear(), rocksdb::HistogramImpl::Median(), and rocksdb::HistogramImpl::Percentile().

47  {
48  HistogramImpl histogram;
49  for (uint64_t i = 1; i <= 100; i++) {
50  histogram.Add(i);
51  }
52  histogram.Clear();
53  ASSERT_EQ(histogram.Median(), 0);
54  ASSERT_EQ(histogram.Percentile(85.0), 0);
55  ASSERT_EQ(histogram.Average(), 0);
56 }

Here is the call graph for this function:

rocksdb::TEST ( PerfContextTest  ,
SeekIntoDeletion   
)

Definition at line 52 of file perf_context_test.cc.

References rocksdb::HistogramImpl::Add(), ASSERT_TRUE, db, rocksdb::Env::Default(), rocksdb::DBImpl::Delete(), DestroyDB(), rocksdb::StopWatchNano::ElapsedNanos(), FLAGS_total_keys, rocksdb::DBImpl::Get(), rocksdb::PerfContext::internal_delete_skipped_count, rocksdb::PerfContext::internal_key_skipped_count, kDbName, rocksdb::DBImpl::NewIterator(), OpenDb(), perf_context, rocksdb::DBImpl::Put(), rocksdb::PerfContext::Reset(), beast::IP::to_string(), rocksdb::HistogramImpl::ToString(), rocksdb::PerfContext::user_key_comparison_count, and value.

52  {
53  DestroyDB(kDbName, Options());
54  auto db = OpenDb();
55  WriteOptions write_options;
56  ReadOptions read_options;
57 
58  for (int i = 0; i < FLAGS_total_keys; ++i) {
59  std::string key = "k" + std::to_string(i);
60  std::string value = "v" + std::to_string(i);
61 
62  db->Put(write_options, key, value);
63  }
64 
65  for (int i = 0; i < FLAGS_total_keys -1 ; ++i) {
66  std::string key = "k" + std::to_string(i);
67  db->Delete(write_options, key);
68  }
69 
70  HistogramImpl hist_get;
71  HistogramImpl hist_get_time;
72  for (int i = 0; i < FLAGS_total_keys - 1; ++i) {
73  std::string key = "k" + std::to_string(i);
74  std::string value;
75 
77  StopWatchNano timer(Env::Default(), true);
78  auto status = db->Get(read_options, key, &value);
79  auto elapsed_nanos = timer.ElapsedNanos();
80  ASSERT_TRUE(status.IsNotFound());
82  hist_get_time.Add(elapsed_nanos);
83  }
84 
85  std::cout << "Get uesr key comparison: \n" << hist_get.ToString()
86  << "Get time: \n" << hist_get_time.ToString();
87 
88  HistogramImpl hist_seek_to_first;
89  std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
90 
92  StopWatchNano timer(Env::Default(), true);
93  iter->SeekToFirst();
94  hist_seek_to_first.Add(perf_context.user_key_comparison_count);
95  auto elapsed_nanos = timer.ElapsedNanos();
96 
97  std::cout << "SeekToFirst uesr key comparison: \n" << hist_seek_to_first.ToString()
98  << "ikey skipped: " << perf_context.internal_key_skipped_count << "\n"
99  << "idelete skipped: " << perf_context.internal_delete_skipped_count << "\n"
100  << "elapsed: " << elapsed_nanos << "\n";
101 
102  HistogramImpl hist_seek;
103  for (int i = 0; i < FLAGS_total_keys; ++i) {
104  std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
105  std::string key = "k" + std::to_string(i);
106 
108  StopWatchNano timer(Env::Default(), true);
109  iter->Seek(key);
110  auto elapsed_nanos = timer.ElapsedNanos();
112  std::cout << "seek cmp: " << perf_context.user_key_comparison_count
113  << " ikey skipped " << perf_context.internal_key_skipped_count
114  << " idelete skipped " << perf_context.internal_delete_skipped_count
115  << " elapsed: " << elapsed_nanos << "ns\n";
116 
118  ASSERT_TRUE(iter->Valid());
119  StopWatchNano timer2(Env::Default(), true);
120  iter->Next();
121  auto elapsed_nanos2 = timer2.ElapsedNanos();
122  std::cout << "next cmp: " << perf_context.user_key_comparison_count
123  << "elapsed: " << elapsed_nanos2 << "ns\n";
124  }
125 
126  std::cout << "Seek uesr key comparison: \n" << hist_seek.ToString();
127 }

Here is the call graph for this function:

rocksdb::TEST ( SkipTest  ,
InsertAndLookup   
)

Definition at line 52 of file skiplist_test.cc.

References ASSERT_EQ, ASSERT_TRUE, rocksdb::SkipList< Key, Comparator >::Contains(), rocksdb::SkipList< Key, Comparator >::Insert(), rocksdb::SkipList< Key, Comparator >::Iterator::key(), rocksdb::Random::Next(), rocksdb::SkipList< Key, Comparator >::Iterator::Next(), rocksdb::SkipList< Key, Comparator >::Iterator::Prev(), R, rocksdb::SkipList< Key, Comparator >::Iterator::Seek(), rocksdb::SkipList< Key, Comparator >::Iterator::SeekToFirst(), rocksdb::SkipList< Key, Comparator >::Iterator::SeekToLast(), and rocksdb::SkipList< Key, Comparator >::Iterator::Valid().

52  {
53  const int N = 2000;
54  const int R = 5000;
55  Random rnd(1000);
56  std::set<Key> keys;
57  ArenaImpl arena_impl;
58  TestComparator cmp;
59  SkipList<Key, TestComparator> list(cmp, &arena_impl);
60  for (int i = 0; i < N; i++) {
61  Key key = rnd.Next() % R;
62  if (keys.insert(key).second) {
63  list.Insert(key);
64  }
65  }
66 
67  for (int i = 0; i < R; i++) {
68  if (list.Contains(i)) {
69  ASSERT_EQ(keys.count(i), 1U);
70  } else {
71  ASSERT_EQ(keys.count(i), 0U);
72  }
73  }
74 
75  // Simple iterator tests
76  {
77  SkipList<Key, TestComparator>::Iterator iter(&list);
78  ASSERT_TRUE(!iter.Valid());
79 
80  iter.Seek(0);
81  ASSERT_TRUE(iter.Valid());
82  ASSERT_EQ(*(keys.begin()), iter.key());
83 
84  iter.SeekToFirst();
85  ASSERT_TRUE(iter.Valid());
86  ASSERT_EQ(*(keys.begin()), iter.key());
87 
88  iter.SeekToLast();
89  ASSERT_TRUE(iter.Valid());
90  ASSERT_EQ(*(keys.rbegin()), iter.key());
91  }
92 
93  // Forward iteration test
94  for (int i = 0; i < R; i++) {
95  SkipList<Key, TestComparator>::Iterator iter(&list);
96  iter.Seek(i);
97 
98  // Compare against model iterator
99  std::set<Key>::iterator model_iter = keys.lower_bound(i);
100  for (int j = 0; j < 3; j++) {
101  if (model_iter == keys.end()) {
102  ASSERT_TRUE(!iter.Valid());
103  break;
104  } else {
105  ASSERT_TRUE(iter.Valid());
106  ASSERT_EQ(*model_iter, iter.key());
107  ++model_iter;
108  iter.Next();
109  }
110  }
111  }
112 
113  // Backward iteration test
114  {
115  SkipList<Key, TestComparator>::Iterator iter(&list);
116  iter.SeekToLast();
117 
118  // Compare against model iterator
119  for (std::set<Key>::reverse_iterator model_iter = keys.rbegin();
120  model_iter != keys.rend();
121  ++model_iter) {
122  ASSERT_TRUE(iter.Valid());
123  ASSERT_EQ(*model_iter, iter.key());
124  iter.Prev();
125  }
126  ASSERT_TRUE(!iter.Valid());
127  }
128 }

Here is the call graph for this function:

rocksdb::TEST ( RedisListsTest  ,
SimpleTest   
)

Definition at line 53 of file redis_lists_test.cc.

References ASSERT_EQ, ASSERT_TRUE, AssertListEq(), rocksdb::RedisLists::Index(), rocksdb::RedisLists::Length(), rocksdb::RedisLists::PushRight(), and rocksdb::RedisLists::Range().

53  {
54  RedisLists redis(kDefaultDbName, options, true); // Destructive
55 
56  string tempv; // Used below for all Index(), PopRight(), PopLeft()
57 
58  // Simple PushRight (should return the new length each time)
59  ASSERT_EQ(redis.PushRight("k1", "v1"), 1);
60  ASSERT_EQ(redis.PushRight("k1", "v2"), 2);
61  ASSERT_EQ(redis.PushRight("k1", "v3"), 3);
62 
63  // Check Length and Index() functions
64  ASSERT_EQ(redis.Length("k1"), 3); // Check length
65  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
66  ASSERT_EQ(tempv, "v1"); // Check valid indices
67  ASSERT_TRUE(redis.Index("k1", 1, &tempv));
68  ASSERT_EQ(tempv, "v2");
69  ASSERT_TRUE(redis.Index("k1", 2, &tempv));
70  ASSERT_EQ(tempv, "v3");
71 
72  // Check range function and vectors
73  std::vector<std::string> result = redis.Range("k1", 0, 2); // Get the list
74  std::vector<std::string> expected_result(3);
75  expected_result[0] = "v1";
76  expected_result[1] = "v2";
77  expected_result[2] = "v3";
78  AssertListEq(result, expected_result);
79 }

Here is the call graph for this function:

rocksdb::TEST ( FormatTest  ,
InternalKey_EncodeDecode   
)

Definition at line 54 of file dbformat_test.cc.

References kTypeDeletion, kTypeValue, and TestKey().

54  {
55  const char* keys[] = { "", "k", "hello", "longggggggggggggggggggggg" };
56  const uint64_t seq[] = {
57  1, 2, 3,
58  (1ull << 8) - 1, 1ull << 8, (1ull << 8) + 1,
59  (1ull << 16) - 1, 1ull << 16, (1ull << 16) + 1,
60  (1ull << 32) - 1, 1ull << 32, (1ull << 32) + 1
61  };
62  for (unsigned int k = 0; k < sizeof(keys) / sizeof(keys[0]); k++) {
63  for (unsigned int s = 0; s < sizeof(seq) / sizeof(seq[0]); s++) {
64  TestKey(keys[k], seq[s], kTypeValue);
65  TestKey("hello", 1, kTypeDeletion);
66  }
67  }
68 }

Here is the call graph for this function:

rocksdb::TEST ( FindFileTest  ,
Empty   
)

Definition at line 56 of file version_set_test.cc.

References ASSERT_EQ, and ASSERT_TRUE.

56  {
57  ASSERT_EQ(0, Find("foo"));
58  ASSERT_TRUE(! Overlaps("a", "z"));
59  ASSERT_TRUE(! Overlaps(nullptr, "z"));
60  ASSERT_TRUE(! Overlaps("a", nullptr));
61  ASSERT_TRUE(! Overlaps(nullptr, nullptr));
62 }
rocksdb::TEST ( FilterBlockTest  ,
EmptyBuilder   
)

Definition at line 57 of file filter_block_test.cc.

References ASSERT_EQ, ASSERT_TRUE, EscapeString(), rocksdb::FilterBlockBuilder::Finish(), rocksdb::FilterBlockReader::KeyMayMatch(), and options_.

57  {
58  FilterBlockBuilder builder(options_);
59  Slice block = builder.Finish();
60  ASSERT_EQ("\\x00\\x00\\x00\\x00\\x0b", EscapeString(block));
61  FilterBlockReader reader(options_, block);
62  ASSERT_TRUE(reader.KeyMayMatch(0, "foo"));
63  ASSERT_TRUE(reader.KeyMayMatch(100000, "foo"));
64 }

Here is the call graph for this function:

rocksdb::TEST ( ArenaImplTest  ,
Simple   
)

Definition at line 60 of file arena_test.cc.

References rocksdb::ArenaImpl::Allocate(), rocksdb::ArenaImpl::AllocateAligned(), rocksdb::ArenaImpl::ApproximateMemoryUsage(), ASSERT_EQ, ASSERT_GE, ASSERT_LE, rocksdb::Random::OneIn(), and rocksdb::Random::Uniform().

60  {
61  std::vector<std::pair<size_t, char*> > allocated;
62  ArenaImpl arena_impl;
63  const int N = 100000;
64  size_t bytes = 0;
65  Random rnd(301);
66  for (int i = 0; i < N; i++) {
67  size_t s;
68  if (i % (N / 10) == 0) {
69  s = i;
70  } else {
71  s = rnd.OneIn(4000) ? rnd.Uniform(6000) :
72  (rnd.OneIn(10) ? rnd.Uniform(100) : rnd.Uniform(20));
73  }
74  if (s == 0) {
75  // Our arena disallows size 0 allocations.
76  s = 1;
77  }
78  char* r;
79  if (rnd.OneIn(10)) {
80  r = arena_impl.AllocateAligned(s);
81  } else {
82  r = arena_impl.Allocate(s);
83  }
84 
85  for (unsigned int b = 0; b < s; b++) {
86  // Fill the "i"th allocation with a known bit pattern
87  r[b] = i % 256;
88  }
89  bytes += s;
90  allocated.push_back(std::make_pair(s, r));
91  ASSERT_GE(arena_impl.ApproximateMemoryUsage(), bytes);
92  if (i > N/10) {
93  ASSERT_LE(arena_impl.ApproximateMemoryUsage(), bytes * 1.10);
94  }
95  }
96  for (unsigned int i = 0; i < allocated.size(); i++) {
97  size_t num_bytes = allocated[i].first;
98  const char* p = allocated[i].second;
99  for (unsigned int b = 0; b < num_bytes; b++) {
100  // Check the "i"th allocation for the known bit pattern
101  ASSERT_EQ(int(p[b]) & 0xff, (int)(i % 256));
102  }
103  }
104 }

Here is the call graph for this function:

rocksdb::TEST ( Coding  ,
EncodingOutput   
)

Definition at line 60 of file coding_test.cc.

References ASSERT_EQ, PutFixed32(), and PutFixed64().

60  {
61  std::string dst;
62  PutFixed32(&dst, 0x04030201);
63  ASSERT_EQ(4U, dst.size());
64  ASSERT_EQ(0x01, static_cast<int>(dst[0]));
65  ASSERT_EQ(0x02, static_cast<int>(dst[1]));
66  ASSERT_EQ(0x03, static_cast<int>(dst[2]));
67  ASSERT_EQ(0x04, static_cast<int>(dst[3]));
68 
69  dst.clear();
70  PutFixed64(&dst, 0x0807060504030201ull);
71  ASSERT_EQ(8U, dst.size());
72  ASSERT_EQ(0x01, static_cast<int>(dst[0]));
73  ASSERT_EQ(0x02, static_cast<int>(dst[1]));
74  ASSERT_EQ(0x03, static_cast<int>(dst[2]));
75  ASSERT_EQ(0x04, static_cast<int>(dst[3]));
76  ASSERT_EQ(0x05, static_cast<int>(dst[4]));
77  ASSERT_EQ(0x06, static_cast<int>(dst[5]));
78  ASSERT_EQ(0x07, static_cast<int>(dst[6]));
79  ASSERT_EQ(0x08, static_cast<int>(dst[7]));
80 }

Here is the call graph for this function:

rocksdb::TEST ( FindFileTest  ,
Single   
)

Definition at line 64 of file version_set_test.cc.

References ASSERT_EQ, and ASSERT_TRUE.

64  {
65  Add("p", "q");
66  ASSERT_EQ(0, Find("a"));
67  ASSERT_EQ(0, Find("p"));
68  ASSERT_EQ(0, Find("p1"));
69  ASSERT_EQ(0, Find("q"));
70  ASSERT_EQ(1, Find("q1"));
71  ASSERT_EQ(1, Find("z"));
72 
73  ASSERT_TRUE(! Overlaps("a", "b"));
74  ASSERT_TRUE(! Overlaps("z1", "z2"));
75  ASSERT_TRUE(Overlaps("a", "p"));
76  ASSERT_TRUE(Overlaps("a", "q"));
77  ASSERT_TRUE(Overlaps("a", "z"));
78  ASSERT_TRUE(Overlaps("p", "p1"));
79  ASSERT_TRUE(Overlaps("p", "q"));
80  ASSERT_TRUE(Overlaps("p", "z"));
81  ASSERT_TRUE(Overlaps("p1", "p2"));
82  ASSERT_TRUE(Overlaps("p1", "z"));
83  ASSERT_TRUE(Overlaps("q", "q"));
84  ASSERT_TRUE(Overlaps("q", "q1"));
85 
86  ASSERT_TRUE(! Overlaps(nullptr, "j"));
87  ASSERT_TRUE(! Overlaps("r", nullptr));
88  ASSERT_TRUE(Overlaps(nullptr, "p"));
89  ASSERT_TRUE(Overlaps(nullptr, "p1"));
90  ASSERT_TRUE(Overlaps("q", nullptr));
91  ASSERT_TRUE(Overlaps(nullptr, nullptr));
92 }
rocksdb::TEST ( FilterBlockTest  ,
SingleChunk   
)

Definition at line 66 of file filter_block_test.cc.

References rocksdb::FilterBlockBuilder::AddKey(), ASSERT_TRUE, rocksdb::FilterBlockBuilder::Finish(), rocksdb::FilterBlockReader::KeyMayMatch(), options_, and rocksdb::FilterBlockBuilder::StartBlock().

66  {
67  FilterBlockBuilder builder(options_);
68  builder.StartBlock(100);
69  builder.AddKey("foo");
70  builder.AddKey("bar");
71  builder.AddKey("box");
72  builder.StartBlock(200);
73  builder.AddKey("box");
74  builder.StartBlock(300);
75  builder.AddKey("hello");
76  Slice block = builder.Finish();
77  FilterBlockReader reader(options_, block);
78  ASSERT_TRUE(reader.KeyMayMatch(100, "foo"));
79  ASSERT_TRUE(reader.KeyMayMatch(100, "bar"));
80  ASSERT_TRUE(reader.KeyMayMatch(100, "box"));
81  ASSERT_TRUE(reader.KeyMayMatch(100, "hello"));
82  ASSERT_TRUE(reader.KeyMayMatch(100, "foo"));
83  ASSERT_TRUE(! reader.KeyMayMatch(100, "missing"));
84  ASSERT_TRUE(! reader.KeyMayMatch(100, "other"));
85 }

Here is the call graph for this function:

rocksdb::TEST ( FormatTest  ,
InternalKeyShortSeparator   
)

Definition at line 70 of file dbformat_test.cc.

References ASSERT_EQ, IKey(), kMaxSequenceNumber, kTypeDeletion, kTypeValue, kValueTypeForSeek, and Shorten().

70  {
71  // When user keys are same
72  ASSERT_EQ(IKey("foo", 100, kTypeValue),
73  Shorten(IKey("foo", 100, kTypeValue),
74  IKey("foo", 99, kTypeValue)));
75  ASSERT_EQ(IKey("foo", 100, kTypeValue),
76  Shorten(IKey("foo", 100, kTypeValue),
77  IKey("foo", 101, kTypeValue)));
78  ASSERT_EQ(IKey("foo", 100, kTypeValue),
79  Shorten(IKey("foo", 100, kTypeValue),
80  IKey("foo", 100, kTypeValue)));
81  ASSERT_EQ(IKey("foo", 100, kTypeValue),
82  Shorten(IKey("foo", 100, kTypeValue),
83  IKey("foo", 100, kTypeDeletion)));
84 
85  // When user keys are misordered
86  ASSERT_EQ(IKey("foo", 100, kTypeValue),
87  Shorten(IKey("foo", 100, kTypeValue),
88  IKey("bar", 99, kTypeValue)));
89 
90  // When user keys are different, but correctly ordered
92  Shorten(IKey("foo", 100, kTypeValue),
93  IKey("hello", 200, kTypeValue)));
94 
95  // When start user key is prefix of limit user key
96  ASSERT_EQ(IKey("foo", 100, kTypeValue),
97  Shorten(IKey("foo", 100, kTypeValue),
98  IKey("foobar", 200, kTypeValue)));
99 
100  // When limit user key is prefix of start user key
101  ASSERT_EQ(IKey("foobar", 100, kTypeValue),
102  Shorten(IKey("foobar", 100, kTypeValue),
103  IKey("foo", 200, kTypeValue)));
104 }

Here is the call graph for this function:

rocksdb::TEST ( WriteBatchTest  ,
Empty   
)

Definition at line 78 of file write_batch_test.cc.

References ASSERT_EQ, rocksdb::WriteBatchInternal::Count(), rocksdb::WriteBatch::Count(), and PrintContents().

78  {
79  WriteBatch batch;
80  ASSERT_EQ("", PrintContents(&batch));
81  ASSERT_EQ(0, WriteBatchInternal::Count(&batch));
82  ASSERT_EQ(0, batch.Count());
83 }

Here is the call graph for this function:

rocksdb::TEST ( Coding  ,
Varint32   
)

Definition at line 82 of file coding_test.cc.

References ASSERT_EQ, ASSERT_TRUE, GetVarint32Ptr(), PutVarint32(), and VarintLength().

82  {
83  std::string s;
84  for (uint32_t i = 0; i < (32 * 32); i++) {
85  uint32_t v = (i / 32) << (i % 32);
86  PutVarint32(&s, v);
87  }
88 
89  const char* p = s.data();
90  const char* limit = p + s.size();
91  for (uint32_t i = 0; i < (32 * 32); i++) {
92  uint32_t expected = (i / 32) << (i % 32);
93  uint32_t actual;
94  const char* start = p;
95  p = GetVarint32Ptr(p, limit, &actual);
96  ASSERT_TRUE(p != nullptr);
97  ASSERT_EQ(expected, actual);
98  ASSERT_EQ(VarintLength(actual), p - start);
99  }
100  ASSERT_EQ(p, s.data() + s.size());
101 }

Here is the call graph for this function:

rocksdb::TEST ( RedisListsTest  ,
SimpleTest2   
)

Definition at line 82 of file redis_lists_test.cc.

References ASSERT_EQ, ASSERT_TRUE, AssertListEq(), rocksdb::RedisLists::Index(), rocksdb::RedisLists::Length(), rocksdb::RedisLists::PushLeft(), and rocksdb::RedisLists::Range().

82  {
83  RedisLists redis(kDefaultDbName, options, true); // Destructive
84 
85  string tempv; // Used below for all Index(), PopRight(), PopLeft()
86 
87  // Simple PushRight
88  ASSERT_EQ(redis.PushLeft("k1", "v3"), 1);
89  ASSERT_EQ(redis.PushLeft("k1", "v2"), 2);
90  ASSERT_EQ(redis.PushLeft("k1", "v1"), 3);
91 
92  // Check Length and Index() functions
93  ASSERT_EQ(redis.Length("k1"), 3); // Check length
94  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
95  ASSERT_EQ(tempv, "v1"); // Check valid indices
96  ASSERT_TRUE(redis.Index("k1", 1, &tempv));
97  ASSERT_EQ(tempv, "v2");
98  ASSERT_TRUE(redis.Index("k1", 2, &tempv));
99  ASSERT_EQ(tempv, "v3");
100 
101  // Check range function and vectors
102  std::vector<std::string> result = redis.Range("k1", 0, 2); // Get the list
103  std::vector<std::string> expected_result(3);
104  expected_result[0] = "v1";
105  expected_result[1] = "v2";
106  expected_result[2] = "v3";
107  AssertListEq(result, expected_result);
108 }

Here is the call graph for this function:

rocksdb::TEST ( WriteBatchTest  ,
Multiple   
)

Definition at line 85 of file write_batch_test.cc.

References ASSERT_EQ, rocksdb::WriteBatchInternal::Count(), rocksdb::WriteBatch::Count(), rocksdb::WriteBatch::Delete(), PrintContents(), rocksdb::WriteBatch::Put(), rocksdb::WriteBatchInternal::Sequence(), and rocksdb::WriteBatchInternal::SetSequence().

85  {
86  WriteBatch batch;
87  batch.Put(Slice("foo"), Slice("bar"));
88  batch.Delete(Slice("box"));
89  batch.Put(Slice("baz"), Slice("boo"));
90  WriteBatchInternal::SetSequence(&batch, 100);
91  ASSERT_EQ(100U, WriteBatchInternal::Sequence(&batch));
92  ASSERT_EQ(3, WriteBatchInternal::Count(&batch));
93  ASSERT_EQ("Put(baz, boo)@102"
94  "Delete(box)@101"
95  "Put(foo, bar)@100",
96  PrintContents(&batch));
97  ASSERT_EQ(3, batch.Count());
98 }

Here is the call graph for this function:

rocksdb::TEST ( FilterBlockTest  ,
MultiChunk   
)

Definition at line 87 of file filter_block_test.cc.

References rocksdb::FilterBlockBuilder::AddKey(), ASSERT_TRUE, rocksdb::FilterBlockBuilder::Finish(), rocksdb::FilterBlockReader::KeyMayMatch(), options_, and rocksdb::FilterBlockBuilder::StartBlock().

87  {
88  FilterBlockBuilder builder(options_);
89 
90  // First filter
91  builder.StartBlock(0);
92  builder.AddKey("foo");
93  builder.StartBlock(2000);
94  builder.AddKey("bar");
95 
96  // Second filter
97  builder.StartBlock(3100);
98  builder.AddKey("box");
99 
100  // Third filter is empty
101 
102  // Last filter
103  builder.StartBlock(9000);
104  builder.AddKey("box");
105  builder.AddKey("hello");
106 
107  Slice block = builder.Finish();
108  FilterBlockReader reader(options_, block);
109 
110  // Check first filter
111  ASSERT_TRUE(reader.KeyMayMatch(0, "foo"));
112  ASSERT_TRUE(reader.KeyMayMatch(2000, "bar"));
113  ASSERT_TRUE(! reader.KeyMayMatch(0, "box"));
114  ASSERT_TRUE(! reader.KeyMayMatch(0, "hello"));
115 
116  // Check second filter
117  ASSERT_TRUE(reader.KeyMayMatch(3100, "box"));
118  ASSERT_TRUE(! reader.KeyMayMatch(3100, "foo"));
119  ASSERT_TRUE(! reader.KeyMayMatch(3100, "bar"));
120  ASSERT_TRUE(! reader.KeyMayMatch(3100, "hello"));
121 
122  // Check third filter (empty)
123  ASSERT_TRUE(! reader.KeyMayMatch(4100, "foo"));
124  ASSERT_TRUE(! reader.KeyMayMatch(4100, "bar"));
125  ASSERT_TRUE(! reader.KeyMayMatch(4100, "box"));
126  ASSERT_TRUE(! reader.KeyMayMatch(4100, "hello"));
127 
128  // Check last filter
129  ASSERT_TRUE(reader.KeyMayMatch(9000, "box"));
130  ASSERT_TRUE(reader.KeyMayMatch(9000, "hello"));
131  ASSERT_TRUE(! reader.KeyMayMatch(9000, "foo"));
132  ASSERT_TRUE(! reader.KeyMayMatch(9000, "bar"));
133 }

Here is the call graph for this function:

rocksdb::TEST ( FileNameTest  ,
Construction   
)

Definition at line 88 of file filename_test.cc.

References ASSERT_EQ, ASSERT_TRUE, CurrentFileName(), DescriptorFileName(), kCurrentFile, kDBLockFile, kDescriptorFile, kLogFile, kMetaDatabase, kTableFile, kTempFile, LockFileName(), LogFileName(), MetaDatabaseName(), ParseFileName(), TableFileName(), and TempFileName().

88  {
89  uint64_t number;
90  FileType type;
91  std::string fname;
92 
93  fname = CurrentFileName("foo");
94  ASSERT_EQ("foo/", std::string(fname.data(), 4));
95  ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
96  ASSERT_EQ(0U, number);
97  ASSERT_EQ(kCurrentFile, type);
98 
99  fname = LockFileName("foo");
100  ASSERT_EQ("foo/", std::string(fname.data(), 4));
101  ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
102  ASSERT_EQ(0U, number);
103  ASSERT_EQ(kDBLockFile, type);
104 
105  fname = LogFileName("foo", 192);
106  ASSERT_EQ("foo/", std::string(fname.data(), 4));
107  ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
108  ASSERT_EQ(192U, number);
109  ASSERT_EQ(kLogFile, type);
110 
111  fname = TableFileName("bar", 200);
112  ASSERT_EQ("bar/", std::string(fname.data(), 4));
113  ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
114  ASSERT_EQ(200U, number);
115  ASSERT_EQ(kTableFile, type);
116 
117  fname = DescriptorFileName("bar", 100);
118  ASSERT_EQ("bar/", std::string(fname.data(), 4));
119  ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
120  ASSERT_EQ(100U, number);
121  ASSERT_EQ(kDescriptorFile, type);
122 
123  fname = TempFileName("tmp", 999);
124  ASSERT_EQ("tmp/", std::string(fname.data(), 4));
125  ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
126  ASSERT_EQ(999U, number);
127  ASSERT_EQ(kTempFile, type);
128 
129  fname = MetaDatabaseName("met", 100);
130  ASSERT_EQ("met/", std::string(fname.data(), 4));
131  ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
132  ASSERT_EQ(100U, number);
133  ASSERT_EQ(kMetaDatabase, type);
134 }

Here is the call graph for this function:

rocksdb::TEST ( EnvPosixTest  ,
StartThread   
)

Definition at line 91 of file env_test.cc.

References ASSERT_EQ, rocksdb::Env::Default(), env_, kDelayMicros, rocksdb::port::Mutex::Lock(), rocksdb::State::mu, rocksdb::State::num_running, rocksdb::Env::SleepForMicroseconds(), rocksdb::Env::StartThread(), ThreadBody(), rocksdb::port::Mutex::Unlock(), and rocksdb::State::val.

91  {
92  State state;
93  state.val = 0;
94  state.num_running = 3;
95  for (int i = 0; i < 3; i++) {
96  env_->StartThread(&ThreadBody, &state);
97  }
98  while (true) {
99  state.mu.Lock();
100  int num = state.num_running;
101  state.mu.Unlock();
102  if (num == 0) {
103  break;
104  }
105  Env::Default()->SleepForMicroseconds(kDelayMicros);
106  }
107  ASSERT_EQ(state.val, 3);
108 }

Here is the call graph for this function:

rocksdb::TEST ( MemEnvTest  ,
ReadWrite   
)

Definition at line 91 of file memenv_test.cc.

References rocksdb::WritableFile::Append(), ASSERT_EQ, ASSERT_OK, ASSERT_TRUE, rocksdb::Slice::compare(), rocksdb::Env::CreateDir(), env_, rocksdb::Env::NewRandomAccessFile(), rocksdb::Env::NewSequentialFile(), rocksdb::Env::NewWritableFile(), rocksdb::Status::ok(), rocksdb::SequentialFile::Read(), rocksdb::RandomAccessFile::Read(), rocksdb::Slice::size(), rocksdb::SequentialFile::Skip(), and soptions_.

91  {
92  unique_ptr<WritableFile> writable_file;
93  unique_ptr<SequentialFile> seq_file;
94  unique_ptr<RandomAccessFile> rand_file;
95  Slice result;
96  char scratch[100];
97 
98  ASSERT_OK(env_->CreateDir("/dir"));
99 
100  ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file, soptions_));
101  ASSERT_OK(writable_file->Append("hello "));
102  ASSERT_OK(writable_file->Append("world"));
103  writable_file.reset();
104 
105  // Read sequentially.
106  ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file, soptions_));
107  ASSERT_OK(seq_file->Read(5, &result, scratch)); // Read "hello".
108  ASSERT_EQ(0, result.compare("hello"));
109  ASSERT_OK(seq_file->Skip(1));
110  ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Read "world".
111  ASSERT_EQ(0, result.compare("world"));
112  ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Try reading past EOF.
113  ASSERT_EQ(0U, result.size());
114  ASSERT_OK(seq_file->Skip(100)); // Try to skip past end of file.
115  ASSERT_OK(seq_file->Read(1000, &result, scratch));
116  ASSERT_EQ(0U, result.size());
117 
118  // Random reads.
119  ASSERT_OK(env_->NewRandomAccessFile("/dir/f", &rand_file, soptions_));
120  ASSERT_OK(rand_file->Read(6, 5, &result, scratch)); // Read "world".
121  ASSERT_EQ(0, result.compare("world"));
122  ASSERT_OK(rand_file->Read(0, 5, &result, scratch)); // Read "hello".
123  ASSERT_EQ(0, result.compare("hello"));
124  ASSERT_OK(rand_file->Read(10, 100, &result, scratch)); // Read "d".
125  ASSERT_EQ(0, result.compare("d"));
126 
127  // Too high offset.
128  ASSERT_TRUE(!rand_file->Read(1000, 5, &result, scratch).ok());
129 }

Here is the call graph for this function:

rocksdb::TEST ( BloomTest  ,
EmptyFilter   
)

Definition at line 92 of file bloom_test.cc.

References ASSERT_TRUE.

92  {
93  ASSERT_TRUE(! Matches("hello"));
94  ASSERT_TRUE(! Matches("world"));
95 }
rocksdb::TEST ( BlobStoreTest  ,
FragmentedChunksTest   
)

Definition at line 94 of file blob_store_test.cc.

References ASSERT_EQ, ASSERT_OK, rocksdb::Blob::chunks, rocksdb::Env::Default(), rocksdb::BlobStore::Delete(), rocksdb::BlobStore::Put(), rocksdb::test::RandomString(), and rocksdb::test::TmpDir().

94  {
95  const uint64_t block_size = 10;
96  const uint32_t blocks_per_file = 20;
97  Random random(5);
98 
99  BlobStore blob_store(test::TmpDir() + "/blob_store_test",
100  block_size,
101  blocks_per_file,
102  1000,
103  Env::Default());
104 
105  string buf;
106 
107  vector <Blob> r(4);
108 
109  // put 4 strings of size 50
110  for (int k = 0; k < 4; ++k) {
111  test::RandomString(&random, 50, &buf);
112  ASSERT_OK(blob_store.Put(Slice(buf), &r[k]));
113  // use the first file
114  for (size_t i = 0; i < r[k].chunks.size(); ++i) {
115  ASSERT_EQ(r[k].chunks[0].bucket_id, 0u);
116  }
117  }
118 
119  // delete the first and third
120  ASSERT_OK(blob_store.Delete(r[0]));
121  ASSERT_OK(blob_store.Delete(r[2]));
122 
123  // put string of size 100. it should reuse space that we deleting
124  // by deleting first and third strings of size 50
125  test::RandomString(&random, 100, &buf);
126  Blob r2;
127  ASSERT_OK(blob_store.Put(Slice(buf), &r2));
128  // use the first file
129  for (size_t i = 0; i < r2.chunks.size(); ++i) {
130  ASSERT_EQ(r2.chunks[0].bucket_id, 0u);
131  }
132 }

Here is the call graph for this function:

rocksdb::TEST ( FindFileTest  ,
Multiple   
)

Definition at line 95 of file version_set_test.cc.

References ASSERT_EQ, and ASSERT_TRUE.

95  {
96  Add("150", "200");
97  Add("200", "250");
98  Add("300", "350");
99  Add("400", "450");
100  ASSERT_EQ(0, Find("100"));
101  ASSERT_EQ(0, Find("150"));
102  ASSERT_EQ(0, Find("151"));
103  ASSERT_EQ(0, Find("199"));
104  ASSERT_EQ(0, Find("200"));
105  ASSERT_EQ(1, Find("201"));
106  ASSERT_EQ(1, Find("249"));
107  ASSERT_EQ(1, Find("250"));
108  ASSERT_EQ(2, Find("251"));
109  ASSERT_EQ(2, Find("299"));
110  ASSERT_EQ(2, Find("300"));
111  ASSERT_EQ(2, Find("349"));
112  ASSERT_EQ(2, Find("350"));
113  ASSERT_EQ(3, Find("351"));
114  ASSERT_EQ(3, Find("400"));
115  ASSERT_EQ(3, Find("450"));
116  ASSERT_EQ(4, Find("451"));
117 
118  ASSERT_TRUE(! Overlaps("100", "149"));
119  ASSERT_TRUE(! Overlaps("251", "299"));
120  ASSERT_TRUE(! Overlaps("451", "500"));
121  ASSERT_TRUE(! Overlaps("351", "399"));
122 
123  ASSERT_TRUE(Overlaps("100", "150"));
124  ASSERT_TRUE(Overlaps("100", "200"));
125  ASSERT_TRUE(Overlaps("100", "300"));
126  ASSERT_TRUE(Overlaps("100", "400"));
127  ASSERT_TRUE(Overlaps("100", "500"));
128  ASSERT_TRUE(Overlaps("375", "400"));
129  ASSERT_TRUE(Overlaps("450", "450"));
130  ASSERT_TRUE(Overlaps("450", "500"));
131 }
rocksdb::TEST ( ReduceLevelTest  ,
Last_Level   
)

Definition at line 96 of file reduce_levels_test.cc.

References ASSERT_EQ, ASSERT_OK, and ASSERT_TRUE.

96  {
97  // create files on all levels;
98  ASSERT_OK(OpenDB(true, 4, 3));
99  ASSERT_OK(Put("aaaa", "11111"));
100  ASSERT_OK(CompactMemTable());
101  ASSERT_EQ(FilesOnLevel(3), 1);
102  CloseDB();
103 
104  ASSERT_TRUE(ReduceLevels(3));
105  ASSERT_OK(OpenDB(true, 3, 1));
106  ASSERT_EQ(FilesOnLevel(2), 1);
107  CloseDB();
108 
109  ASSERT_TRUE(ReduceLevels(2));
110  ASSERT_OK(OpenDB(true, 2, 1));
111  ASSERT_EQ(FilesOnLevel(1), 1);
112  CloseDB();
113 }
rocksdb::TEST ( BloomTest  ,
Small   
)

Definition at line 97 of file bloom_test.cc.

References ASSERT_TRUE.

97  {
98  Add("hello");
99  Add("world");
100  ASSERT_TRUE(Matches("hello"));
101  ASSERT_TRUE(Matches("world"));
102  ASSERT_TRUE(! Matches("x"));
103  ASSERT_TRUE(! Matches("foo"));
104 }
rocksdb::TEST ( WriteBatchTest  ,
Corruption   
)

Definition at line 100 of file write_batch_test.cc.

References ASSERT_EQ, rocksdb::WriteBatchInternal::Contents(), rocksdb::Slice::data(), rocksdb::WriteBatch::Delete(), PrintContents(), rocksdb::WriteBatch::Put(), rocksdb::WriteBatchInternal::SetContents(), rocksdb::WriteBatchInternal::SetSequence(), and rocksdb::Slice::size().

100  {
101  WriteBatch batch;
102  batch.Put(Slice("foo"), Slice("bar"));
103  batch.Delete(Slice("box"));
104  WriteBatchInternal::SetSequence(&batch, 200);
105  Slice contents = WriteBatchInternal::Contents(&batch);
106  WriteBatchInternal::SetContents(&batch,
107  Slice(contents.data(),contents.size()-1));
108  ASSERT_EQ("Put(foo, bar)@200"
109  "Corruption: bad WriteBatch Delete",
110  PrintContents(&batch));
111 }

Here is the call graph for this function:

rocksdb::TEST ( Coding  ,
Varint64   
)

Definition at line 103 of file coding_test.cc.

References ASSERT_EQ, ASSERT_TRUE, GetVarint64Ptr(), PutVarint64(), and VarintLength().

103  {
104  // Construct the list of values to check
105  std::vector<uint64_t> values;
106  // Some special values
107  values.push_back(0);
108  values.push_back(100);
109  values.push_back(~static_cast<uint64_t>(0));
110  values.push_back(~static_cast<uint64_t>(0) - 1);
111  for (uint32_t k = 0; k < 64; k++) {
112  // Test values near powers of two
113  const uint64_t power = 1ull << k;
114  values.push_back(power);
115  values.push_back(power-1);
116  values.push_back(power+1);
117  };
118 
119  std::string s;
120  for (unsigned int i = 0; i < values.size(); i++) {
121  PutVarint64(&s, values[i]);
122  }
123 
124  const char* p = s.data();
125  const char* limit = p + s.size();
126  for (unsigned int i = 0; i < values.size(); i++) {
127  ASSERT_TRUE(p < limit);
128  uint64_t actual;
129  const char* start = p;
130  p = GetVarint64Ptr(p, limit, &actual);
131  ASSERT_TRUE(p != nullptr);
132  ASSERT_EQ(values[i], actual);
133  ASSERT_EQ(VarintLength(actual), p - start);
134  }
135  ASSERT_EQ(p, limit);
136 
137 }

Here is the call graph for this function:

rocksdb::TEST ( FormatTest  ,
InternalKeyShortestSuccessor   
)

Definition at line 106 of file dbformat_test.cc.

References ASSERT_EQ, IKey(), kMaxSequenceNumber, kTypeValue, kValueTypeForSeek, and ShortSuccessor().

106  {
108  ShortSuccessor(IKey("foo", 100, kTypeValue)));
109  ASSERT_EQ(IKey("\xff\xff", 100, kTypeValue),
110  ShortSuccessor(IKey("\xff\xff", 100, kTypeValue)));
111 }

Here is the call graph for this function:

rocksdb::TEST ( CacheTest  ,
HitAndMiss   
)

Definition at line 110 of file cache_test.cc.

References ASSERT_EQ.

110  {
111  ASSERT_EQ(-1, Lookup(100));
112 
113  Insert(100, 101);
114  ASSERT_EQ(101, Lookup(100));
115  ASSERT_EQ(-1, Lookup(200));
116  ASSERT_EQ(-1, Lookup(300));
117 
118  Insert(200, 201);
119  ASSERT_EQ(101, Lookup(100));
120  ASSERT_EQ(201, Lookup(200));
121  ASSERT_EQ(-1, Lookup(300));
122 
123  Insert(100, 102);
124  ASSERT_EQ(102, Lookup(100));
125  ASSERT_EQ(201, Lookup(200));
126  ASSERT_EQ(-1, Lookup(300));
127 
128  ASSERT_EQ(1U, deleted_keys_.size());
129  ASSERT_EQ(100, deleted_keys_[0]);
130  ASSERT_EQ(101, deleted_values_[0]);
131 }
rocksdb::TEST ( EnvPosixTest  ,
TwoPools   
)

Definition at line 110 of file env_test.cc.

References ASSERT_LE, rocksdb::Env::Default(), env_, kDelayMicros, mu_, rocksdb::Env::Schedule(), rocksdb::Env::SetBackgroundThreads(), and rocksdb::Env::SleepForMicroseconds().

110  {
111 
112  class CB {
113  public:
114  CB(const std::string& pool_name, int pool_size)
115  : mu_(),
116  num_running_(0),
117  num_finished_(0),
118  pool_size_(pool_size),
119  pool_name_(pool_name) { }
120 
121  static void Run(void* v) {
122  CB* cb = reinterpret_cast<CB*>(v);
123  cb->Run();
124  }
125 
126  void Run() {
127  {
128  MutexLock l(&mu_);
129  num_running_++;
130  std::cout << "Pool " << pool_name_ << ": "
131  << num_running_ << " running threads.\n";
132  // make sure we don't have more than pool_size_ jobs running.
133  ASSERT_LE(num_running_, pool_size_);
134  }
135 
136  // sleep for 1 sec
137  Env::Default()->SleepForMicroseconds(1000000);
138 
139  {
140  MutexLock l(&mu_);
141  num_running_--;
142  num_finished_++;
143  }
144  }
145 
146  int NumFinished() {
147  MutexLock l(&mu_);
148  return num_finished_;
149  }
150 
151  private:
152  port::Mutex mu_;
153  int num_running_;
154  int num_finished_;
155  int pool_size_;
156  std::string pool_name_;
157  };
158 
159  const int kLowPoolSize = 2;
160  const int kHighPoolSize = 4;
161  const int kJobs = 8;
162 
163  CB low_pool_job("low", kLowPoolSize);
164  CB high_pool_job("high", kHighPoolSize);
165 
166  env_->SetBackgroundThreads(kLowPoolSize);
167  env_->SetBackgroundThreads(kHighPoolSize, Env::Priority::HIGH);
168 
169  // schedule same number of jobs in each pool
170  for (int i = 0; i < kJobs; i++) {
171  env_->Schedule(&CB::Run, &low_pool_job);
172  env_->Schedule(&CB::Run, &high_pool_job, Env::Priority::HIGH);
173  }
174 
175  // wait for all jobs to finish
176  while (low_pool_job.NumFinished() < kJobs ||
177  high_pool_job.NumFinished() < kJobs) {
178  env_->SleepForMicroseconds(kDelayMicros);
179  }
180 }

Here is the call graph for this function:

rocksdb::TEST ( RedisListsTest  ,
IndexTest   
)

Definition at line 111 of file redis_lists_test.cc.

References ASSERT_EQ, ASSERT_TRUE, rocksdb::RedisLists::Index(), rocksdb::RedisLists::PushLeft(), and rocksdb::RedisLists::PushRight().

111  {
112  RedisLists redis(kDefaultDbName, options, true); // Destructive
113 
114  string tempv; // Used below for all Index(), PopRight(), PopLeft()
115 
116  // Empty Index check (return empty and should not crash or edit tempv)
117  tempv = "yo";
118  ASSERT_TRUE(!redis.Index("k1", 0, &tempv));
119  ASSERT_EQ(tempv, "yo");
120  ASSERT_TRUE(!redis.Index("fda", 3, &tempv));
121  ASSERT_EQ(tempv, "yo");
122  ASSERT_TRUE(!redis.Index("random", -12391, &tempv));
123  ASSERT_EQ(tempv, "yo");
124 
125  // Simple Pushes (will yield: [v6, v4, v4, v1, v2, v3]
126  redis.PushRight("k1", "v1");
127  redis.PushRight("k1", "v2");
128  redis.PushRight("k1", "v3");
129  redis.PushLeft("k1", "v4");
130  redis.PushLeft("k1", "v4");
131  redis.PushLeft("k1", "v6");
132 
133  // Simple, non-negative indices
134  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
135  ASSERT_EQ(tempv, "v6");
136  ASSERT_TRUE(redis.Index("k1", 1, &tempv));
137  ASSERT_EQ(tempv, "v4");
138  ASSERT_TRUE(redis.Index("k1", 2, &tempv));
139  ASSERT_EQ(tempv, "v4");
140  ASSERT_TRUE(redis.Index("k1", 3, &tempv));
141  ASSERT_EQ(tempv, "v1");
142  ASSERT_TRUE(redis.Index("k1", 4, &tempv));
143  ASSERT_EQ(tempv, "v2");
144  ASSERT_TRUE(redis.Index("k1", 5, &tempv));
145  ASSERT_EQ(tempv, "v3");
146 
147  // Negative indices
148  ASSERT_TRUE(redis.Index("k1", -6, &tempv));
149  ASSERT_EQ(tempv, "v6");
150  ASSERT_TRUE(redis.Index("k1", -5, &tempv));
151  ASSERT_EQ(tempv, "v4");
152  ASSERT_TRUE(redis.Index("k1", -4, &tempv));
153  ASSERT_EQ(tempv, "v4");
154  ASSERT_TRUE(redis.Index("k1", -3, &tempv));
155  ASSERT_EQ(tempv, "v1");
156  ASSERT_TRUE(redis.Index("k1", -2, &tempv));
157  ASSERT_EQ(tempv, "v2");
158  ASSERT_TRUE(redis.Index("k1", -1, &tempv));
159  ASSERT_EQ(tempv, "v3");
160 
161  // Out of bounds (return empty, no crash)
162  ASSERT_TRUE(!redis.Index("k1", 6, &tempv));
163  ASSERT_TRUE(!redis.Index("k1", 123219, &tempv));
164  ASSERT_TRUE(!redis.Index("k1", -7, &tempv));
165  ASSERT_TRUE(!redis.Index("k1", -129, &tempv));
166 }

Here is the call graph for this function:

rocksdb::TEST ( WriteBatchTest  ,
Append   
)

Definition at line 113 of file write_batch_test.cc.

References rocksdb::WriteBatchInternal::Append(), ASSERT_EQ, rocksdb::WriteBatch::Clear(), rocksdb::WriteBatch::Count(), rocksdb::WriteBatch::Delete(), PrintContents(), rocksdb::WriteBatch::Put(), and rocksdb::WriteBatchInternal::SetSequence().

113  {
114  WriteBatch b1, b2;
115  WriteBatchInternal::SetSequence(&b1, 200);
116  WriteBatchInternal::SetSequence(&b2, 300);
117  WriteBatchInternal::Append(&b1, &b2);
118  ASSERT_EQ("",
119  PrintContents(&b1));
120  ASSERT_EQ(0, b1.Count());
121  b2.Put("a", "va");
122  WriteBatchInternal::Append(&b1, &b2);
123  ASSERT_EQ("Put(a, va)@200",
124  PrintContents(&b1));
125  ASSERT_EQ(1, b1.Count());
126  b2.Clear();
127  b2.Put("b", "vb");
128  WriteBatchInternal::Append(&b1, &b2);
129  ASSERT_EQ("Put(a, va)@200"
130  "Put(b, vb)@201",
131  PrintContents(&b1));
132  ASSERT_EQ(2, b1.Count());
133  b2.Delete("foo");
134  WriteBatchInternal::Append(&b1, &b2);
135  ASSERT_EQ("Put(a, va)@200"
136  "Put(b, vb)@202"
137  "Put(b, vb)@201"
138  "Delete(foo)@203",
139  PrintContents(&b1));
140  ASSERT_EQ(4, b1.Count());
141 }

Here is the call graph for this function:

rocksdb::TEST ( ReduceLevelTest  ,
Top_Level   
)

Definition at line 115 of file reduce_levels_test.cc.

References ASSERT_EQ, ASSERT_OK, and ASSERT_TRUE.

115  {
116  // create files on all levels;
117  ASSERT_OK(OpenDB(true, 5, 0));
118  ASSERT_OK(Put("aaaa", "11111"));
119  ASSERT_OK(CompactMemTable());
120  ASSERT_EQ(FilesOnLevel(0), 1);
121  CloseDB();
122 
123  ASSERT_TRUE(ReduceLevels(4));
124  ASSERT_OK(OpenDB(true, 4, 0));
125  CloseDB();
126 
127  ASSERT_TRUE(ReduceLevels(3));
128  ASSERT_OK(OpenDB(true, 3, 0));
129  CloseDB();
130 
131  ASSERT_TRUE(ReduceLevels(2));
132  ASSERT_OK(OpenDB(true, 2, 0));
133  CloseDB();
134 }
rocksdb::TEST ( BloomTest  ,
VaryingLengths   
)

Definition at line 119 of file bloom_test.cc.

References ASSERT_LE, ASSERT_TRUE, kVerbose, and NextLength().

119  {
120  char buffer[sizeof(int)];
121 
122  // Count number of filters that significantly exceed the false positive rate
123  int mediocre_filters = 0;
124  int good_filters = 0;
125 
126  for (int length = 1; length <= 10000; length = NextLength(length)) {
127  Reset();
128  for (int i = 0; i < length; i++) {
129  Add(Key(i, buffer));
130  }
131  Build();
132 
133  ASSERT_LE(FilterSize(), (size_t)((length * 10 / 8) + 40)) << length;
134 
135  // All added keys must match
136  for (int i = 0; i < length; i++) {
137  ASSERT_TRUE(Matches(Key(i, buffer)))
138  << "Length " << length << "; key " << i;
139  }
140 
141  // Check false positive rate
142  double rate = FalsePositiveRate();
143  if (kVerbose >= 1) {
144  fprintf(stderr, "False positives: %5.2f%% @ length = %6d ; bytes = %6d\n",
145  rate*100.0, length, static_cast<int>(FilterSize()));
146  }
147  ASSERT_LE(rate, 0.02); // Must not be over 2%
148  if (rate > 0.0125) mediocre_filters++; // Allowed, but not too often
149  else good_filters++;
150  }
151  if (kVerbose >= 1) {
152  fprintf(stderr, "Filters: %d good, %d mediocre\n",
153  good_filters, mediocre_filters);
154  }
155  ASSERT_LE(mediocre_filters, good_filters/5);
156 }

Here is the call graph for this function:

rocksdb::TEST ( AutoRollLoggerTest  ,
RollLogFileBySize   
)

Definition at line 126 of file auto_roll_logger_test.cc.

References logger.

126  {
127  InitTestDb();
128  size_t log_max_size = 1024 * 5;
129 
130  AutoRollLogger logger(Env::Default(), kTestDir, "", log_max_size, 0);
131 
132  RollLogFileBySizeTest(&logger, log_max_size,
133  kSampleMessage + ":RollLogFileBySize");
134 
135 }
rocksdb::TEST ( StringAppendOperatorTest  ,
IteratorTest   
)

Definition at line 129 of file stringappend_test.cc.

References rocksdb::StringLists::Append(), ASSERT_EQ, db_, beast.util.Iter::first(), rocksdb::DBImpl::NewIterator(), and OpenDb().

129  {
130  auto db_ = OpenDb(',');
131  StringLists slists(db_);
132 
133  slists.Append("k1", "v1");
134  slists.Append("k1", "v2");
135  slists.Append("k1", "v3");
136 
137  slists.Append("k2", "a1");
138  slists.Append("k2", "a2");
139  slists.Append("k2", "a3");
140 
141  std::string res;
142  std::unique_ptr<rocksdb::Iterator> it(db_->NewIterator(ReadOptions()));
143  std::string k1("k1");
144  std::string k2("k2");
145  bool first = true;
146  for (it->Seek(k1); it->Valid(); it->Next()) {
147  res = it->value().ToString();
148  if (first) {
149  ASSERT_EQ(res, "v1,v2,v3");
150  first = false;
151  } else {
152  ASSERT_EQ(res, "a1,a2,a3");
153  }
154  }
155  slists.Append("k2", "a4");
156  slists.Append("k1", "v4");
157 
158  // Snapshot should still be the same. Should ignore a4 and v4.
159  first = true;
160  for (it->Seek(k1); it->Valid(); it->Next()) {
161  res = it->value().ToString();
162  if (first) {
163  ASSERT_EQ(res, "v1,v2,v3");
164  first = false;
165  } else {
166  ASSERT_EQ(res, "a1,a2,a3");
167  }
168  }
169 
170 
171  // Should release the snapshot and be aware of the new stuff now
172  it.reset(db_->NewIterator(ReadOptions()));
173  first = true;
174  for (it->Seek(k1); it->Valid(); it->Next()) {
175  res = it->value().ToString();
176  if (first) {
177  ASSERT_EQ(res, "v1,v2,v3,v4");
178  first = false;
179  } else {
180  ASSERT_EQ(res, "a1,a2,a3,a4");
181  }
182  }
183 
184  // start from k2 this time.
185  for (it->Seek(k2); it->Valid(); it->Next()) {
186  res = it->value().ToString();
187  if (first) {
188  ASSERT_EQ(res, "v1,v2,v3,v4");
189  first = false;
190  } else {
191  ASSERT_EQ(res, "a1,a2,a3,a4");
192  }
193  }
194 
195  slists.Append("k3", "g1");
196 
197  it.reset(db_->NewIterator(ReadOptions()));
198  first = true;
199  std::string k3("k3");
200  for(it->Seek(k2); it->Valid(); it->Next()) {
201  res = it->value().ToString();
202  if (first) {
203  ASSERT_EQ(res, "a1,a2,a3,a4");
204  first = false;
205  } else {
206  ASSERT_EQ(res, "g1");
207  }
208  }
209  for(it->Seek(k3); it->Valid(); it->Next()) {
210  res = it->value().ToString();
211  if (first) {
212  // should not be hit
213  ASSERT_EQ(res, "a1,a2,a3,a4");
214  first = false;
215  } else {
216  ASSERT_EQ(res, "g1");
217  }
218  }
219 
220 }

Here is the call graph for this function:

rocksdb::TEST ( PerfContextTest  ,
StopWatchNanoOverhead   
)

Definition at line 129 of file perf_context_test.cc.

References rocksdb::HistogramImpl::Add(), rocksdb::Env::Default(), rocksdb::StopWatchNano::ElapsedNanos(), and rocksdb::HistogramImpl::ToString().

129  {
130  // profile the timer cost by itself!
131  const int kTotalIterations = 1000000;
132  std::vector<uint64_t> timings(kTotalIterations);
133 
134  StopWatchNano timer(Env::Default(), true);
135  for (auto& timing : timings) {
136  timing = timer.ElapsedNanos(true /* reset */);
137  }
138 
139  HistogramImpl histogram;
140  for (const auto timing : timings) {
141  histogram.Add(timing);
142  }
143 
144  std::cout << histogram.ToString();
145 }

Here is the call graph for this function:

rocksdb::TEST ( MemEnvTest  ,
Locks   
)

Definition at line 131 of file memenv_test.cc.

References ASSERT_OK, env_, rocksdb::Env::LockFile(), and rocksdb::Env::UnlockFile().

131  {
132  FileLock* lock;
133 
134  // These are no-ops, but we test they return success.
135  ASSERT_OK(env_->LockFile("some file", &lock));
136  ASSERT_OK(env_->UnlockFile(lock));
137 }

Here is the call graph for this function:

rocksdb::TEST ( CacheTest  ,
Erase   
)

Definition at line 133 of file cache_test.cc.

References ASSERT_EQ.

133  {
134  Erase(200);
135  ASSERT_EQ(0U, deleted_keys_.size());
136 
137  Insert(100, 101);
138  Insert(200, 201);
139  Erase(100);
140  ASSERT_EQ(-1, Lookup(100));
141  ASSERT_EQ(201, Lookup(200));
142  ASSERT_EQ(1U, deleted_keys_.size());
143  ASSERT_EQ(100, deleted_keys_[0]);
144  ASSERT_EQ(101, deleted_values_[0]);
145 
146  Erase(100);
147  ASSERT_EQ(-1, Lookup(100));
148  ASSERT_EQ(201, Lookup(200));
149  ASSERT_EQ(1U, deleted_keys_.size());
150 }
rocksdb::TEST ( PrefixTest  ,
DynamicPrefixIterator   
)

Definition at line 133 of file prefix_test.cc.

References rocksdb::HistogramImpl::Add(), ASSERT_EQ, ASSERT_OK, ASSERT_TRUE, db, rocksdb::Env::Default(), rocksdb::DBImpl::Delete(), DestroyDB(), rocksdb::StopWatchNano::ElapsedNanos(), kDbName, rocksdb::DBImpl::NewIterator(), OpenDb(), perf_context, rocksdb::TestKey::prefix, std::chrono::prefix, rocksdb::ReadOptions::prefix_seek, rocksdb::DBImpl::Put(), rocksdb::PerfContext::Reset(), SliceToTestKey(), TestKeyToSlice(), beast::IP::to_string(), rocksdb::HistogramImpl::ToString(), rocksdb::PerfContext::user_key_comparison_count, and value.

133  {
134 
135  DestroyDB(kDbName, Options());
136  auto db = OpenDb();
137  WriteOptions write_options;
138  ReadOptions read_options;
139 
140  std::vector<uint64_t> prefixes;
141  for (uint64_t i = 0; i < FLAGS_total_prefixes; ++i) {
142  prefixes.push_back(i);
143  }
144 
145  if (FLAGS_random_prefix) {
146  std::random_shuffle(prefixes.begin(), prefixes.end());
147  }
148 
149  // insert x random prefix, each with y continuous element.
150  for (auto prefix : prefixes) {
151  for (uint64_t sorted = 0; sorted < FLAGS_items_per_prefix; sorted++) {
152  TestKey test_key(prefix, sorted);
153 
154  Slice key = TestKeyToSlice(test_key);
155  std::string value = "v" + std::to_string(sorted);
156 
157  ASSERT_OK(db->Put(write_options, key, value));
158  }
159  }
160 
161  // test seek existing keys
162  HistogramImpl hist_seek_time;
163  HistogramImpl hist_seek_comparison;
164 
165  if (FLAGS_use_prefix_hash_memtable) {
166  read_options.prefix_seek = true;
167  }
168  std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
169 
170  for (auto prefix : prefixes) {
171  TestKey test_key(prefix, FLAGS_items_per_prefix / 2);
172  Slice key = TestKeyToSlice(test_key);
173  std::string value = "v" + std::to_string(0);
174 
176  StopWatchNano timer(Env::Default(), true);
177  uint64_t total_keys = 0;
178  for (iter->Seek(key); iter->Valid(); iter->Next()) {
179  if (FLAGS_trigger_deadlock) {
180  std::cout << "Behold the deadlock!\n";
181  db->Delete(write_options, iter->key());
182  }
183  auto test_key = SliceToTestKey(iter->key());
184  if (test_key->prefix != prefix) break;
185  total_keys++;
186  }
187  hist_seek_time.Add(timer.ElapsedNanos());
188  hist_seek_comparison.Add(perf_context.user_key_comparison_count);
189  ASSERT_EQ(total_keys, FLAGS_items_per_prefix - FLAGS_items_per_prefix/2);
190  }
191 
192  std::cout << "Seek key comparison: \n"
193  << hist_seek_comparison.ToString()
194  << "Seek time: \n"
195  << hist_seek_time.ToString();
196 
197  // test non-existing keys
198  HistogramImpl hist_no_seek_time;
199  HistogramImpl hist_no_seek_comparison;
200 
201  for (auto prefix = FLAGS_total_prefixes;
202  prefix < FLAGS_total_prefixes + 100;
203  prefix++) {
204  TestKey test_key(prefix, 0);
205  Slice key = TestKeyToSlice(test_key);
206 
208  StopWatchNano timer(Env::Default(), true);
209  iter->Seek(key);
210  hist_no_seek_time.Add(timer.ElapsedNanos());
211  hist_no_seek_comparison.Add(perf_context.user_key_comparison_count);
212  ASSERT_TRUE(!iter->Valid());
213  }
214 
215  std::cout << "non-existing Seek key comparison: \n"
216  << hist_no_seek_comparison.ToString()
217  << "non-existing Seek time: \n"
218  << hist_no_seek_time.ToString();
219 }

Here is the call graph for this function:

rocksdb::TEST ( FindFileTest  ,
MultipleNullBoundaries   
)

Definition at line 133 of file version_set_test.cc.

References ASSERT_TRUE.

133  {
134  Add("150", "200");
135  Add("200", "250");
136  Add("300", "350");
137  Add("400", "450");
138  ASSERT_TRUE(! Overlaps(nullptr, "149"));
139  ASSERT_TRUE(! Overlaps("451", nullptr));
140  ASSERT_TRUE(Overlaps(nullptr, nullptr));
141  ASSERT_TRUE(Overlaps(nullptr, "150"));
142  ASSERT_TRUE(Overlaps(nullptr, "199"));
143  ASSERT_TRUE(Overlaps(nullptr, "200"));
144  ASSERT_TRUE(Overlaps(nullptr, "201"));
145  ASSERT_TRUE(Overlaps(nullptr, "400"));
146  ASSERT_TRUE(Overlaps(nullptr, "800"));
147  ASSERT_TRUE(Overlaps("100", nullptr));
148  ASSERT_TRUE(Overlaps("200", nullptr));
149  ASSERT_TRUE(Overlaps("449", nullptr));
150  ASSERT_TRUE(Overlaps("450", nullptr));
151 }
rocksdb::TEST ( BlobStoreTest  ,
CreateAndStoreTest   
)

Definition at line 134 of file blob_store_test.cc.

References ASSERT_EQ, ASSERT_OK, rocksdb::Env::Default(), rocksdb::BlobStore::Delete(), rocksdb::BlobStore::Get(), rocksdb::BlobStore::Put(), rand, rocksdb::test::RandomString(), rocksdb::BlobStore::Sync(), and rocksdb::test::TmpDir().

134  {
135  const uint64_t block_size = 10;
136  const uint32_t blocks_per_file = 1000;
137  const int max_blurb_size = 300;
138  Random random(5);
139 
140  BlobStore blob_store(test::TmpDir() + "/blob_store_test",
141  block_size,
142  blocks_per_file,
143  10000,
144  Env::Default());
145  vector<pair<Blob, string>> ranges;
146 
147  for (int i = 0; i < 2000; ++i) {
148  int decision = rand() % 5;
149  if (decision <= 2 || ranges.size() == 0) {
150  string buf;
151  int size_blocks = (rand() % max_blurb_size + 1);
152  int string_size = size_blocks * block_size - (rand() % block_size);
153  test::RandomString(&random, string_size, &buf);
154  Blob r;
155  ASSERT_OK(blob_store.Put(Slice(buf), &r));
156  ranges.push_back(make_pair(r, buf));
157  } else if (decision == 3) {
158  int ti = rand() % ranges.size();
159  string out_buf;
160  ASSERT_OK(blob_store.Get(ranges[ti].first, &out_buf));
161  ASSERT_EQ(ranges[ti].second, out_buf);
162  } else {
163  int ti = rand() % ranges.size();
164  ASSERT_OK(blob_store.Delete(ranges[ti].first));
165  ranges.erase(ranges.begin() + ti);
166  }
167  }
168  ASSERT_OK(blob_store.Sync());
169 }

Here is the call graph for this function:

rocksdb::TEST ( ReduceLevelTest  ,
All_Levels   
)

Definition at line 136 of file reduce_levels_test.cc.

References ASSERT_EQ, ASSERT_OK, and ASSERT_TRUE.

136  {
137  // create files on all levels;
138  ASSERT_OK(OpenDB(true, 5, 1));
139  ASSERT_OK(Put("a", "a11111"));
140  ASSERT_OK(CompactMemTable());
141  ASSERT_EQ(FilesOnLevel(1), 1);
142  CloseDB();
143 
144  ASSERT_OK(OpenDB(true, 5, 2));
145  ASSERT_OK(Put("b", "b11111"));
146  ASSERT_OK(CompactMemTable());
147  ASSERT_EQ(FilesOnLevel(1), 1);
148  ASSERT_EQ(FilesOnLevel(2), 1);
149  CloseDB();
150 
151  ASSERT_OK(OpenDB(true, 5, 3));
152  ASSERT_OK(Put("c", "c11111"));
153  ASSERT_OK(CompactMemTable());
154  ASSERT_EQ(FilesOnLevel(1), 1);
155  ASSERT_EQ(FilesOnLevel(2), 1);
156  ASSERT_EQ(FilesOnLevel(3), 1);
157  CloseDB();
158 
159  ASSERT_OK(OpenDB(true, 5, 4));
160  ASSERT_OK(Put("d", "d11111"));
161  ASSERT_OK(CompactMemTable());
162  ASSERT_EQ(FilesOnLevel(1), 1);
163  ASSERT_EQ(FilesOnLevel(2), 1);
164  ASSERT_EQ(FilesOnLevel(3), 1);
165  ASSERT_EQ(FilesOnLevel(4), 1);
166  CloseDB();
167 
168  ASSERT_TRUE(ReduceLevels(4));
169  ASSERT_OK(OpenDB(true, 4, 0));
170  ASSERT_EQ("a11111", Get("a"));
171  ASSERT_EQ("b11111", Get("b"));
172  ASSERT_EQ("c11111", Get("c"));
173  ASSERT_EQ("d11111", Get("d"));
174  CloseDB();
175 
176  ASSERT_TRUE(ReduceLevels(3));
177  ASSERT_OK(OpenDB(true, 3, 0));
178  ASSERT_EQ("a11111", Get("a"));
179  ASSERT_EQ("b11111", Get("b"));
180  ASSERT_EQ("c11111", Get("c"));
181  ASSERT_EQ("d11111", Get("d"));
182  CloseDB();
183 
184  ASSERT_TRUE(ReduceLevels(2));
185  ASSERT_OK(OpenDB(true, 2, 0));
186  ASSERT_EQ("a11111", Get("a"));
187  ASSERT_EQ("b11111", Get("b"));
188  ASSERT_EQ("c11111", Get("c"));
189  ASSERT_EQ("d11111", Get("d"));
190  CloseDB();
191 }
rocksdb::TEST ( AutoRollLoggerTest  ,
RollLogFileByTime   
)

Definition at line 137 of file auto_roll_logger_test.cc.

References ASSERT_TRUE, env, kLogFile, and logger.

137  {
138  size_t time = 1;
139  size_t log_size = 1024 * 5;
140 
141  InitTestDb();
142  // -- Test the existence of file during the server restart.
143  ASSERT_TRUE(!env->FileExists(kLogFile));
144  AutoRollLogger logger(Env::Default(), kTestDir, "", log_size, 1);
145  ASSERT_TRUE(env->FileExists(kLogFile));
146 
147  RollLogFileByTimeTest(&logger, time, kSampleMessage + ":RollLogFileByTime");
148 }
rocksdb::TEST ( Coding  ,
Varint32Overflow   
)

Definition at line 139 of file coding_test.cc.

References ASSERT_TRUE, and GetVarint32Ptr().

139  {
140  uint32_t result;
141  std::string input("\x81\x82\x83\x84\x85\x11");
142  ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(), &result)
143  == nullptr);
144 }

Here is the call graph for this function:

rocksdb::TEST ( MemEnvTest  ,
Misc   
)

Definition at line 139 of file memenv_test.cc.

References ASSERT_OK, ASSERT_TRUE, env_, rocksdb::Env::GetTestDirectory(), rocksdb::Env::NewWritableFile(), and soptions_.

139  {
140  std::string test_dir;
141  ASSERT_OK(env_->GetTestDirectory(&test_dir));
142  ASSERT_TRUE(!test_dir.empty());
143 
144  unique_ptr<WritableFile> writable_file;
145  ASSERT_OK(env_->NewWritableFile("/a/b", &writable_file, soptions_));
146 
147  // These are no-ops, but we test they return success.
148  ASSERT_OK(writable_file->Sync());
149  ASSERT_OK(writable_file->Flush());
150  ASSERT_OK(writable_file->Close());
151  writable_file.reset();
152 }

Here is the call graph for this function:

rocksdb::TEST ( TablePropertiesTest  ,
CustomizedTablePropertiesCollector   
)

Definition at line 145 of file table_properties_collector_test.cc.

References rocksdb::TableBuilder::Add(), ASSERT_EQ, ASSERT_OK, ASSERT_TRUE, rocksdb::FakeWritableFile::contents(), rocksdb::InternalKey::Encode(), rocksdb::TableBuilder::Finish(), GetVarint32(), kTypeValue, MakeBuilder(), OpenTable(), and rocksdb::Options::table_properties_collectors.

145  {
146  Options options;
147 
148  // make sure the entries will be inserted with order.
149  std::map<std::string, std::string> kvs = {
150  {"About", "val5"}, // starts with 'A'
151  {"Abstract", "val2"}, // starts with 'A'
152  {"Around", "val7"}, // starts with 'A'
153  {"Beyond", "val3"},
154  {"Builder", "val1"},
155  {"Cancel", "val4"},
156  {"Find", "val6"},
157  };
158 
159  // Test properties collectors with internal keys or regular keys
160  for (bool encode_as_internal : { true, false }) {
161  // -- Step 1: build table
162  auto collector = new RegularKeysStartWithA();
163  if (encode_as_internal) {
164  options.table_properties_collectors = {
165  std::make_shared<UserKeyTablePropertiesCollector>(collector)
166  };
167  } else {
168  options.table_properties_collectors.resize(1);
169  options.table_properties_collectors[0].reset(collector);
170  }
171  std::unique_ptr<TableBuilder> builder;
172  std::unique_ptr<FakeWritableFile> writable;
173  MakeBuilder(options, &writable, &builder);
174 
175  for (const auto& kv : kvs) {
176  if (encode_as_internal) {
177  InternalKey ikey(kv.first, 0, ValueType::kTypeValue);
178  builder->Add(ikey.Encode(), kv.second);
179  } else {
180  builder->Add(kv.first, kv.second);
181  }
182  }
183  ASSERT_OK(builder->Finish());
184 
185  // -- Step 2: Open table
186  std::unique_ptr<TableReader> table_reader;
187  OpenTable(options, writable->contents(), &table_reader);
188  const auto& properties =
189  table_reader->GetTableProperties().user_collected_properties;
190 
191  ASSERT_EQ("Rocksdb", properties.at("TablePropertiesTest"));
192 
193  uint32_t starts_with_A = 0;
194  Slice key(properties.at("Count"));
195  ASSERT_TRUE(GetVarint32(&key, &starts_with_A));
196  ASSERT_EQ(3u, starts_with_A);
197  }
198 }

Here is the call graph for this function:

rocksdb::TEST ( Coding  ,
Varint32Truncation   
)

Definition at line 146 of file coding_test.cc.

References ASSERT_EQ, ASSERT_TRUE, GetVarint32Ptr(), and PutVarint32().

146  {
147  uint32_t large_value = (1u << 31) + 100;
148  std::string s;
149  PutVarint32(&s, large_value);
150  uint32_t result;
151  for (unsigned int len = 0; len < s.size() - 1; len++) {
152  ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == nullptr);
153  }
154  ASSERT_TRUE(
155  GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != nullptr);
156  ASSERT_EQ(large_value, result);
157 }

Here is the call graph for this function:

rocksdb::TEST ( DeleteFileTest  ,
AddKeysAndQueryLevels   
)

Definition at line 147 of file deletefile_test.cc.

References ASSERT_EQ, ASSERT_OK, ASSERT_TRUE, db_, rocksdb::DBImpl::DeleteFile(), rocksdb::DBImpl::GetLiveFilesMetaData(), and rocksdb::Status::IsInvalidArgument().

147  {
148  CreateTwoLevels();
149  std::vector<LiveFileMetaData> metadata;
150  std::vector<int> keysinlevel;
151  db_->GetLiveFilesMetaData(&metadata);
152 
153  std::string level1file = "";
154  int level1keycount = 0;
155  std::string level2file = "";
156  int level2keycount = 0;
157  int level1index = 0;
158  int level2index = 1;
159 
160  ASSERT_EQ((int)metadata.size(), 2);
161  if (metadata[0].level == 2) {
162  level1index = 1;
163  level2index = 0;
164  }
165 
166  level1file = metadata[level1index].name;
167  int startkey = atoi(metadata[level1index].smallestkey.c_str());
168  int endkey = atoi(metadata[level1index].largestkey.c_str());
169  level1keycount = (endkey - startkey + 1);
170  level2file = metadata[level2index].name;
171  startkey = atoi(metadata[level2index].smallestkey.c_str());
172  endkey = atoi(metadata[level2index].largestkey.c_str());
173  level2keycount = (endkey - startkey + 1);
174 
175  // COntrolled setup. Levels 1 and 2 should both have 50K files.
176  // This is a little fragile as it depends on the current
177  // compaction heuristics.
178  ASSERT_EQ(level1keycount, 50000);
179  ASSERT_EQ(level2keycount, 50000);
180 
181  Status status = db_->DeleteFile("0.sst");
182  ASSERT_TRUE(status.IsInvalidArgument());
183 
184  // intermediate level files cannot be deleted.
185  status = db_->DeleteFile(level1file);
186  ASSERT_TRUE(status.IsInvalidArgument());
187 
188  // Lowest level file deletion should succeed.
189  ASSERT_OK(db_->DeleteFile(level2file));
190 
191  CloseDB();
192 }

Here is the call graph for this function:

rocksdb::TEST ( PerfContextTest  ,
StopWatchOverhead   
)

Definition at line 147 of file perf_context_test.cc.

References rocksdb::HistogramImpl::Add(), rocksdb::Env::Default(), rocksdb::StopWatch::ElapsedMicros(), and rocksdb::HistogramImpl::ToString().

147  {
148  // profile the timer cost by itself!
149  const int kTotalIterations = 1000000;
150  std::vector<uint64_t> timings(kTotalIterations);
151 
152  StopWatch timer(Env::Default());
153  for (auto& timing : timings) {
154  timing = timer.ElapsedMicros();
155  }
156 
157  HistogramImpl histogram;
158  uint64_t prev_timing = 0;
159  for (const auto timing : timings) {
160  histogram.Add(timing - prev_timing);
161  prev_timing = timing;
162  }
163 
164  std::cout << histogram.ToString();
165 }

Here is the call graph for this function:

rocksdb::TEST ( AutoRollLoggerTest  ,
OpenLogFilesMultipleTimesWithOptionLog_max_size   
)

Definition at line 150 of file auto_roll_logger_test.cc.

References ASSERT_EQ, ASSERT_GT, rocksdb::AutoRollLogger::GetLogFileSize(), logger, and LogMessage().

151  {
152  // If only 'log_max_size' options is specified, then every time
153  // when rocksdb is restarted, a new empty log file will be created.
154  InitTestDb();
155  // WORKAROUND:
156  // avoid complier's complaint of "comparison between signed
157  // and unsigned integer expressions" because literal 0 is
158  // treated as "singed".
159  size_t kZero = 0;
160  size_t log_size = 1024;
161 
162  AutoRollLogger* logger = new AutoRollLogger(
163  Env::Default(), kTestDir, "", log_size, 0);
164 
165  LogMessage(logger, kSampleMessage.c_str());
166  ASSERT_GT(logger->GetLogFileSize(), kZero);
167  delete logger;
168 
169  // reopens the log file and an empty log file will be created.
170  logger = new AutoRollLogger(
171  Env::Default(), kTestDir, "", log_size, 0);
172  ASSERT_EQ(logger->GetLogFileSize(), kZero);
173  delete logger;
174 }

Here is the call graph for this function:

rocksdb::TEST ( CacheTest  ,
EntriesArePinned   
)

Definition at line 152 of file cache_test.cc.

References ASSERT_EQ, DecodeValue(), and EncodeKey().

152  {
153  Insert(100, 101);
154  Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
155  ASSERT_EQ(101, DecodeValue(cache_->Value(h1)));
156 
157  Insert(100, 102);
158  Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
159  ASSERT_EQ(102, DecodeValue(cache_->Value(h2)));
160  ASSERT_EQ(0U, deleted_keys_.size());
161 
162  cache_->Release(h1);
163  ASSERT_EQ(1U, deleted_keys_.size());
164  ASSERT_EQ(100, deleted_keys_[0]);
165  ASSERT_EQ(101, deleted_values_[0]);
166 
167  Erase(100);
168  ASSERT_EQ(-1, Lookup(100));
169  ASSERT_EQ(1U, deleted_keys_.size());
170 
171  cache_->Release(h2);
172  ASSERT_EQ(2U, deleted_keys_.size());
173  ASSERT_EQ(100, deleted_keys_[1]);
174  ASSERT_EQ(102, deleted_values_[1]);
175 }

Here is the call graph for this function:

rocksdb::TEST ( FindFileTest  ,
OverlapSequenceChecks   
)

Definition at line 153 of file version_set_test.cc.

References ASSERT_TRUE.

153  {
154  Add("200", "200", 5000, 3000);
155  ASSERT_TRUE(! Overlaps("199", "199"));
156  ASSERT_TRUE(! Overlaps("201", "300"));
157  ASSERT_TRUE(Overlaps("200", "200"));
158  ASSERT_TRUE(Overlaps("190", "200"));
159  ASSERT_TRUE(Overlaps("200", "210"));
160 }
rocksdb::TEST ( MemEnvTest  ,
LargeWrite   
)

Definition at line 154 of file memenv_test.cc.

References rocksdb::WritableFile::Append(), ASSERT_EQ, ASSERT_OK, ASSERT_TRUE, rocksdb::Slice::compare(), rocksdb::Slice::data(), env_, rocksdb::Env::NewSequentialFile(), rocksdb::Env::NewWritableFile(), beast::asio::InputParser::read(), rocksdb::SequentialFile::Read(), rocksdb::Slice::size(), and soptions_.

154  {
155  const size_t kWriteSize = 300 * 1024;
156  char* scratch = new char[kWriteSize * 2];
157 
158  std::string write_data;
159  for (size_t i = 0; i < kWriteSize; ++i) {
160  write_data.append(1, static_cast<char>(i));
161  }
162 
163  unique_ptr<WritableFile> writable_file;
164  ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file, soptions_));
165  ASSERT_OK(writable_file->Append("foo"));
166  ASSERT_OK(writable_file->Append(write_data));
167  writable_file.reset();
168 
169  unique_ptr<SequentialFile> seq_file;
170  Slice result;
171  ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file, soptions_));
172  ASSERT_OK(seq_file->Read(3, &result, scratch)); // Read "foo".
173  ASSERT_EQ(0, result.compare("foo"));
174 
175  size_t read = 0;
176  std::string read_data;
177  while (read < kWriteSize) {
178  ASSERT_OK(seq_file->Read(kWriteSize - read, &result, scratch));
179  read_data.append(result.data(), result.size());
180  read += result.size();
181  }
182  ASSERT_TRUE(write_data == read_data);
183  delete [] scratch;
184 }

Here is the call graph for this function:

rocksdb::TEST ( Coding  ,
Varint64Overflow   
)

Definition at line 159 of file coding_test.cc.

References ASSERT_TRUE, and GetVarint64Ptr().

159  {
160  uint64_t result;
161  std::string input("\x81\x82\x83\x84\x85\x81\x82\x83\x84\x85\x11");
162  ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(), &result)
163  == nullptr);
164 }

Here is the call graph for this function:

rocksdb::TEST ( WriteBatchTest  ,
Blob   
)

Definition at line 161 of file write_batch_test.cc.

References ASSERT_EQ, rocksdb::WriteBatch::Count(), rocksdb::WriteBatch::Delete(), rocksdb::WriteBatch::Iterate(), rocksdb::WriteBatch::Merge(), PrintContents(), rocksdb::WriteBatch::Put(), and rocksdb::WriteBatch::PutLogData().

161  {
162  WriteBatch batch;
163  batch.Put(Slice("k1"), Slice("v1"));
164  batch.Put(Slice("k2"), Slice("v2"));
165  batch.Put(Slice("k3"), Slice("v3"));
166  batch.PutLogData(Slice("blob1"));
167  batch.Delete(Slice("k2"));
168  batch.PutLogData(Slice("blob2"));
169  batch.Merge(Slice("foo"), Slice("bar"));
170  ASSERT_EQ(5, batch.Count());
171  ASSERT_EQ("Merge(foo, bar)@4"
172  "Put(k1, v1)@0"
173  "Delete(k2)@3"
174  "Put(k2, v2)@1"
175  "Put(k3, v3)@2",
176  PrintContents(&batch));
177 
178  TestHandler handler;
179  batch.Iterate(&handler);
180  ASSERT_EQ(
181  "Put(k1, v1)"
182  "Put(k2, v2)"
183  "Put(k3, v3)"
184  "LogData(blob1)"
185  "Delete(k2)"
186  "LogData(blob2)"
187  "Merge(foo, bar)",
188  handler.seen);
189 }

Here is the call graph for this function:

rocksdb::TEST ( FindFileTest  ,
OverlappingFiles   
)

Definition at line 162 of file version_set_test.cc.

References ASSERT_TRUE.

162  {
163  Add("150", "600");
164  Add("400", "500");
165  disjoint_sorted_files_ = false;
166  ASSERT_TRUE(! Overlaps("100", "149"));
167  ASSERT_TRUE(! Overlaps("601", "700"));
168  ASSERT_TRUE(Overlaps("100", "150"));
169  ASSERT_TRUE(Overlaps("100", "200"));
170  ASSERT_TRUE(Overlaps("100", "300"));
171  ASSERT_TRUE(Overlaps("100", "400"));
172  ASSERT_TRUE(Overlaps("100", "500"));
173  ASSERT_TRUE(Overlaps("375", "400"));
174  ASSERT_TRUE(Overlaps("450", "450"));
175  ASSERT_TRUE(Overlaps("450", "500"));
176  ASSERT_TRUE(Overlaps("450", "700"));
177  ASSERT_TRUE(Overlaps("600", "700"));
178 }
rocksdb::TEST ( Coding  ,
Varint64Truncation   
)

Definition at line 166 of file coding_test.cc.

References ASSERT_EQ, ASSERT_TRUE, GetVarint64Ptr(), and PutVarint64().

166  {
167  uint64_t large_value = (1ull << 63) + 100ull;
168  std::string s;
169  PutVarint64(&s, large_value);
170  uint64_t result;
171  for (unsigned int len = 0; len < s.size() - 1; len++) {
172  ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == nullptr);
173  }
174  ASSERT_TRUE(
175  GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != nullptr);
176  ASSERT_EQ(large_value, result);
177 }

Here is the call graph for this function:

rocksdb::TEST ( RedisListsTest  ,
RangeTest   
)

Definition at line 170 of file redis_lists_test.cc.

References ASSERT_EQ, ASSERT_TRUE, rocksdb::RedisLists::Index(), rocksdb::RedisLists::Length(), rocksdb::RedisLists::PushLeft(), rocksdb::RedisLists::PushRight(), and rocksdb::RedisLists::Range().

170  {
171  RedisLists redis(kDefaultDbName, options, true); // Destructive
172 
173  string tempv; // Used below for all Index(), PopRight(), PopLeft()
174 
175  // Simple Pushes (will yield: [v6, v4, v4, v1, v2, v3])
176  redis.PushRight("k1", "v1");
177  redis.PushRight("k1", "v2");
178  redis.PushRight("k1", "v3");
179  redis.PushLeft("k1", "v4");
180  redis.PushLeft("k1", "v4");
181  redis.PushLeft("k1", "v6");
182 
183  // Sanity check (check the length; make sure it's 6)
184  ASSERT_EQ(redis.Length("k1"), 6);
185 
186  // Simple range
187  std::vector<std::string> res = redis.Range("k1", 1, 4);
188  ASSERT_EQ((int)res.size(), 4);
189  ASSERT_EQ(res[0], "v4");
190  ASSERT_EQ(res[1], "v4");
191  ASSERT_EQ(res[2], "v1");
192  ASSERT_EQ(res[3], "v2");
193 
194  // Negative indices (i.e.: measured from the end)
195  res = redis.Range("k1", 2, -1);
196  ASSERT_EQ((int)res.size(), 4);
197  ASSERT_EQ(res[0], "v4");
198  ASSERT_EQ(res[1], "v1");
199  ASSERT_EQ(res[2], "v2");
200  ASSERT_EQ(res[3], "v3");
201 
202  res = redis.Range("k1", -6, -4);
203  ASSERT_EQ((int)res.size(), 3);
204  ASSERT_EQ(res[0], "v6");
205  ASSERT_EQ(res[1], "v4");
206  ASSERT_EQ(res[2], "v4");
207 
208  res = redis.Range("k1", -1, 5);
209  ASSERT_EQ((int)res.size(), 1);
210  ASSERT_EQ(res[0], "v3");
211 
212  // Partial / Broken indices
213  res = redis.Range("k1", -3, 1000000);
214  ASSERT_EQ((int)res.size(), 3);
215  ASSERT_EQ(res[0], "v1");
216  ASSERT_EQ(res[1], "v2");
217  ASSERT_EQ(res[2], "v3");
218 
219  res = redis.Range("k1", -1000000, 1);
220  ASSERT_EQ((int)res.size(), 2);
221  ASSERT_EQ(res[0], "v6");
222  ASSERT_EQ(res[1], "v4");
223 
224  // Invalid indices
225  res = redis.Range("k1", 7, 9);
226  ASSERT_EQ((int)res.size(), 0);
227 
228  res = redis.Range("k1", -8, -7);
229  ASSERT_EQ((int)res.size(), 0);
230 
231  res = redis.Range("k1", 3, 2);
232  ASSERT_EQ((int)res.size(), 0);
233 
234  res = redis.Range("k1", 5, -2);
235  ASSERT_EQ((int)res.size(), 0);
236 
237  // Range matches Index
238  res = redis.Range("k1", -6, -4);
239  ASSERT_TRUE(redis.Index("k1", -6, &tempv));
240  ASSERT_EQ(tempv, res[0]);
241  ASSERT_TRUE(redis.Index("k1", -5, &tempv));
242  ASSERT_EQ(tempv, res[1]);
243  ASSERT_TRUE(redis.Index("k1", -4, &tempv));
244  ASSERT_EQ(tempv, res[2]);
245 
246  // Last check
247  res = redis.Range("k1", 0, -6);
248  ASSERT_EQ((int)res.size(), 1);
249  ASSERT_EQ(res[0], "v6");
250 }

Here is the call graph for this function:

rocksdb::TEST ( BlobStoreTest  ,
MaxSizeTest   
)

Definition at line 171 of file blob_store_test.cc.

References ASSERT_EQ, ASSERT_OK, rocksdb::Env::Default(), rocksdb::Status::ok(), rocksdb::BlobStore::Put(), rocksdb::test::RandomString(), and rocksdb::test::TmpDir().

171  {
172  const uint64_t block_size = 10;
173  const uint32_t blocks_per_file = 100;
174  const int max_buckets = 10;
175  Random random(5);
176 
177  BlobStore blob_store(test::TmpDir() + "/blob_store_test",
178  block_size,
179  blocks_per_file,
180  max_buckets,
181  Env::Default());
182  string buf;
183  for (int i = 0; i < max_buckets; ++i) {
184  test::RandomString(&random, 1000, &buf);
185  Blob r;
186  ASSERT_OK(blob_store.Put(Slice(buf), &r));
187  }
188 
189  test::RandomString(&random, 1000, &buf);
190  Blob r;
191  // should fail because max size
192  Status s = blob_store.Put(Slice(buf), &r);
193  ASSERT_EQ(s.ok(), false);
194 }

Here is the call graph for this function:

rocksdb::TEST ( AutoRollLoggerTest  ,
CompositeRollByTimeAndSizeLogger   
)

Definition at line 176 of file auto_roll_logger_test.cc.

References logger.

176  {
177  size_t time = 1, log_max_size = 1024 * 5;
178 
179  InitTestDb();
180 
181  AutoRollLogger logger(Env::Default(), kTestDir, "", log_max_size, time);
182 
183  // Test the ability to roll by size
184  RollLogFileBySizeTest(
185  &logger, log_max_size,
186  kSampleMessage + ":CompositeRollByTimeAndSizeLogger");
187 
188  // Test the ability to roll by Time
189  RollLogFileByTimeTest( &logger, time,
190  kSampleMessage + ":CompositeRollByTimeAndSizeLogger");
191 }
rocksdb::TEST ( CacheTest  ,
EvictionPolicy   
)

Definition at line 177 of file cache_test.cc.

References ASSERT_EQ.

177  {
178  Insert(100, 101);
179  Insert(200, 201);
180 
181  // Frequently used entry must be kept around
182  for (int i = 0; i < kCacheSize + 100; i++) {
183  Insert(1000+i, 2000+i);
184  ASSERT_EQ(2000+i, Lookup(1000+i));
185  ASSERT_EQ(101, Lookup(100));
186  }
187  ASSERT_EQ(101, Lookup(100));
188  ASSERT_EQ(-1, Lookup(200));
189 }
rocksdb::TEST ( Coding  ,
Strings   
)

Definition at line 179 of file coding_test.cc.

References ASSERT_EQ, ASSERT_TRUE, GetLengthPrefixedSlice(), PutLengthPrefixedSlice(), and rocksdb::Slice::ToString().

179  {
180  std::string s;
181  PutLengthPrefixedSlice(&s, Slice(""));
182  PutLengthPrefixedSlice(&s, Slice("foo"));
183  PutLengthPrefixedSlice(&s, Slice("bar"));
184  PutLengthPrefixedSlice(&s, Slice(std::string(200, 'x')));
185 
186  Slice input(s);
187  Slice v;
188  ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v));
189  ASSERT_EQ("", v.ToString());
190  ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v));
191  ASSERT_EQ("foo", v.ToString());
192  ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v));
193  ASSERT_EQ("bar", v.ToString());
194  ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v));
195  ASSERT_EQ(std::string(200, 'x'), v.ToString());
196  ASSERT_EQ("", input.ToString());
197 }

Here is the call graph for this function:

rocksdb::TEST ( MemEnvTest  ,
DBTest   
)

Definition at line 186 of file memenv_test.cc.

References ASSERT_OK, ASSERT_TRUE, rocksdb::Options::create_if_missing, db, rocksdb::Options::env, env_, rocksdb::DB::Get(), rocksdb::Iterator::key(), rocksdb::DB::NewIterator(), rocksdb::Iterator::Next(), rocksdb::DB::Open(), rocksdb::DB::Put(), rocksdb::Iterator::SeekToFirst(), rocksdb::DBImpl::TEST_FlushMemTable(), rocksdb::Iterator::Valid(), and rocksdb::Iterator::value().

186  {
187  Options options;
188  options.create_if_missing = true;
189  options.env = env_;
190  DB* db;
191 
192  const Slice keys[] = {Slice("aaa"), Slice("bbb"), Slice("ccc")};
193  const Slice vals[] = {Slice("foo"), Slice("bar"), Slice("baz")};
194 
195  ASSERT_OK(DB::Open(options, "/dir/db", &db));
196  for (size_t i = 0; i < 3; ++i) {
197  ASSERT_OK(db->Put(WriteOptions(), keys[i], vals[i]));
198  }
199 
200  for (size_t i = 0; i < 3; ++i) {
201  std::string res;
202  ASSERT_OK(db->Get(ReadOptions(), keys[i], &res));
203  ASSERT_TRUE(res == vals[i]);
204  }
205 
206  Iterator* iterator = db->NewIterator(ReadOptions());
207  iterator->SeekToFirst();
208  for (size_t i = 0; i < 3; ++i) {
209  ASSERT_TRUE(iterator->Valid());
210  ASSERT_TRUE(keys[i] == iterator->key());
211  ASSERT_TRUE(vals[i] == iterator->value());
212  iterator->Next();
213  }
214  ASSERT_TRUE(!iterator->Valid());
215  delete iterator;
216 
217  DBImpl* dbi = reinterpret_cast<DBImpl*>(db);
218  ASSERT_OK(dbi->TEST_FlushMemTable());
219 
220  for (size_t i = 0; i < 3; ++i) {
221  std::string res;
222  ASSERT_OK(db->Get(ReadOptions(), keys[i], &res));
223  ASSERT_TRUE(res == vals[i]);
224  }
225 
226  delete db;
227 }

Here is the call graph for this function:

rocksdb::TEST ( CacheTest  ,
EvictionPolicyRef   
)

Definition at line 191 of file cache_test.cc.

References ASSERT_EQ, and EncodeKey().

191  {
192  Insert(100, 101);
193  Insert(101, 102);
194  Insert(102, 103);
195  Insert(103, 104);
196  Insert(200, 101);
197  Insert(201, 102);
198  Insert(202, 103);
199  Insert(203, 104);
200  Cache::Handle* h201 = cache_->Lookup(EncodeKey(200));
201  Cache::Handle* h202 = cache_->Lookup(EncodeKey(201));
202  Cache::Handle* h203 = cache_->Lookup(EncodeKey(202));
203  Cache::Handle* h204 = cache_->Lookup(EncodeKey(203));
204  Insert(300, 101);
205  Insert(301, 102);
206  Insert(302, 103);
207  Insert(303, 104);
208 
209  // Insert entries much more than Cache capacity
210  for (int i = 0; i < kCacheSize + 100; i++) {
211  Insert(1000 + i, 2000 + i);
212  }
213 
214  // Check whether the entries inserted in the beginning
215  // are evicted. Ones without extra ref are evicted and
216  // those with are not.
217  ASSERT_EQ(-1, Lookup(100));
218  ASSERT_EQ(-1, Lookup(101));
219  ASSERT_EQ(-1, Lookup(102));
220  ASSERT_EQ(-1, Lookup(103));
221 
222  ASSERT_EQ(-1, Lookup(300));
223  ASSERT_EQ(-1, Lookup(301));
224  ASSERT_EQ(-1, Lookup(302));
225  ASSERT_EQ(-1, Lookup(303));
226 
227  ASSERT_EQ(101, Lookup(200));
228  ASSERT_EQ(102, Lookup(201));
229  ASSERT_EQ(103, Lookup(202));
230  ASSERT_EQ(104, Lookup(203));
231 
232  // Cleaning up all the handles
233  cache_->Release(h201);
234  cache_->Release(h202);
235  cache_->Release(h203);
236  cache_->Release(h204);
237 }

Here is the call graph for this function:

rocksdb::TEST ( WriteBatchTest  ,
Continue   
)

Definition at line 191 of file write_batch_test.cc.

References ASSERT_EQ, rocksdb::WriteBatch::Delete(), rocksdb::WriteBatch::Iterate(), rocksdb::WriteBatch::Merge(), rocksdb::WriteBatch::Put(), and rocksdb::WriteBatch::PutLogData().

191  {
192  WriteBatch batch;
193 
194  struct Handler : public TestHandler {
195  int num_seen = 0;
196  virtual void Put(const Slice& key, const Slice& value) {
197  ++num_seen;
198  TestHandler::Put(key, value);
199  }
200  virtual void Merge(const Slice& key, const Slice& value) {
201  ++num_seen;
202  TestHandler::Merge(key, value);
203  }
204  virtual void LogData(const Slice& blob) {
205  ++num_seen;
206  TestHandler::LogData(blob);
207  }
208  virtual void Delete(const Slice& key) {
209  ++num_seen;
210  TestHandler::Delete(key);
211  }
212  virtual bool Continue() override {
213  return num_seen < 3;
214  }
215  } handler;
216 
217  batch.Put(Slice("k1"), Slice("v1"));
218  batch.PutLogData(Slice("blob1"));
219  batch.Delete(Slice("k1"));
220  batch.PutLogData(Slice("blob2"));
221  batch.Merge(Slice("foo"), Slice("bar"));
222  batch.Iterate(&handler);
223  ASSERT_EQ(
224  "Put(k1, v1)"
225  "LogData(blob1)"
226  "Delete(k1)",
227  handler.seen);
228 }

Here is the call graph for this function:

rocksdb::TEST ( AutoRollLoggerTest  ,
CreateLoggerFromOptions   
)

Definition at line 193 of file auto_roll_logger_test.cc.

References ASSERT_OK, ASSERT_TRUE, CreateLoggerFromOptions(), env, rocksdb::Options::log_file_time_to_roll, logger, and rocksdb::Options::max_log_file_size.

193  {
194  Options options;
195  shared_ptr<Logger> logger;
196 
197  // Normal logger
198  ASSERT_OK(CreateLoggerFromOptions(kTestDir, "", env, options, &logger));
199  ASSERT_TRUE(dynamic_cast<PosixLogger*>(logger.get()));
200 
201  // Only roll by size
202  InitTestDb();
203  options.max_log_file_size = 1024;
204  ASSERT_OK(CreateLoggerFromOptions(kTestDir, "", env, options, &logger));
205  AutoRollLogger* auto_roll_logger =
206  dynamic_cast<AutoRollLogger*>(logger.get());
207  ASSERT_TRUE(auto_roll_logger);
208  RollLogFileBySizeTest(
209  auto_roll_logger, options.max_log_file_size,
210  kSampleMessage + ":CreateLoggerFromOptions - size");
211 
212  // Only roll by Time
213  InitTestDb();
214  options.max_log_file_size = 0;
215  options.log_file_time_to_roll = 1;
216  ASSERT_OK(CreateLoggerFromOptions(kTestDir, "", env, options, &logger));
217  auto_roll_logger =
218  dynamic_cast<AutoRollLogger*>(logger.get());
219  RollLogFileByTimeTest(
220  auto_roll_logger, options.log_file_time_to_roll,
221  kSampleMessage + ":CreateLoggerFromOptions - time");
222 
223  // roll by both Time and size
224  InitTestDb();
225  options.max_log_file_size = 1024 * 5;
226  options.log_file_time_to_roll = 1;
227  ASSERT_OK(CreateLoggerFromOptions(kTestDir, "", env, options, &logger));
228  auto_roll_logger =
229  dynamic_cast<AutoRollLogger*>(logger.get());
230  RollLogFileBySizeTest(
231  auto_roll_logger, options.max_log_file_size,
232  kSampleMessage + ":CreateLoggerFromOptions - both");
233  RollLogFileByTimeTest(
234  auto_roll_logger, options.log_file_time_to_roll,
235  kSampleMessage + ":CreateLoggerFromOptions - both");
236 }

Here is the call graph for this function:

rocksdb::TEST ( DeleteFileTest  ,
PurgeObsoleteFilesTest   
)

Definition at line 194 of file deletefile_test.cc.

References rocksdb::DBImpl::CompactRange(), db_, dbname_, beast.util.Iter::first(), rocksdb::DBImpl::NewIterator(), options_, and rocksdb::Options::wal_dir.

194  {
195  CreateTwoLevels();
196  // there should be only one (empty) log file because CreateTwoLevels()
197  // flushes the memtables to disk
198  CheckFileTypeCounts(options_.wal_dir, 1, 0, 0);
199  // 2 ssts, 1 manifest
200  CheckFileTypeCounts(dbname_, 0, 2, 1);
201  std::string first("0"), last("999999");
202  Slice first_slice(first), last_slice(last);
203  db_->CompactRange(&first_slice, &last_slice, true, 2);
204  // 1 sst after compaction
205  CheckFileTypeCounts(dbname_, 0, 1, 1);
206 
207  // this time, we keep an iterator alive
208  ReopenDB(true);
209  Iterator *itr = 0;
210  CreateTwoLevels();
211  itr = db_->NewIterator(ReadOptions());
212  db_->CompactRange(&first_slice, &last_slice, true, 2);
213  // 3 sst after compaction with live iterator
214  CheckFileTypeCounts(dbname_, 0, 3, 1);
215  delete itr;
216  // 1 sst after iterator deletion
217  CheckFileTypeCounts(dbname_, 0, 1, 1);
218 
219  CloseDB();
220 }

Here is the call graph for this function:

rocksdb::TEST ( Coding  ,
BitStream   
)

Definition at line 199 of file coding_test.cc.

References ASSERT_EQ, BitStreamGetInt(), and BitStreamPutInt().

199  {
200  const int kNumBytes = 10;
201  char bytes[kNumBytes+1];
202  for (int i = 0; i < kNumBytes + 1; ++i) {
203  bytes[i] = '\0';
204  }
205 
206  // Simple byte aligned test.
207  for (int i = 0; i < kNumBytes; ++i) {
208  BitStreamPutInt(bytes, kNumBytes, i*8, 8, 255-i);
209 
210  ASSERT_EQ((unsigned char)bytes[i], (unsigned char)(255-i));
211  }
212  for (int i = 0; i < kNumBytes; ++i) {
213  ASSERT_EQ(BitStreamGetInt(bytes, kNumBytes, i*8, 8), (uint32_t)(255-i));
214  }
215  ASSERT_EQ(bytes[kNumBytes], '\0');
216 
217  // Write and read back at strange offsets
218  for (int i = 0; i < kNumBytes + 1; ++i) {
219  bytes[i] = '\0';
220  }
221  for (int i = 0; i < kNumBytes; ++i) {
222  BitStreamPutInt(bytes, kNumBytes, i*5+1, 4, (i * 7) % (1 << 4));
223  }
224  for (int i = 0; i < kNumBytes; ++i) {
225  ASSERT_EQ(BitStreamGetInt(bytes, kNumBytes, i*5+1, 4),
226  (uint32_t)((i * 7) % (1 << 4)));
227  }
228  ASSERT_EQ(bytes[kNumBytes], '\0');
229 
230  // Create 11011011 as a bit pattern
231  for (int i = 0; i < kNumBytes + 1; ++i) {
232  bytes[i] = '\0';
233  }
234  for (int i = 0; i < kNumBytes; ++i) {
235  BitStreamPutInt(bytes, kNumBytes, i*8, 2, 3);
236  BitStreamPutInt(bytes, kNumBytes, i*8+3, 2, 3);
237  BitStreamPutInt(bytes, kNumBytes, i*8+6, 2, 3);
238 
239  ASSERT_EQ((unsigned char)bytes[i],
240  (unsigned char)(3 + (3 << 3) + (3 << 6)));
241  }
242  ASSERT_EQ(bytes[kNumBytes], '\0');
243 
244 
245  // Test large values
246  for (int i = 0; i < kNumBytes + 1; ++i) {
247  bytes[i] = '\0';
248  }
249  BitStreamPutInt(bytes, kNumBytes, 0, 64, (uint64_t)(-1));
250  for (int i = 0; i < 64/8; ++i) {
251  ASSERT_EQ((unsigned char)bytes[i],
252  (unsigned char)(255));
253  }
254  ASSERT_EQ(bytes[64/8], '\0');
255 
256 
257 }

Here is the call graph for this function:

rocksdb::TEST ( TablePropertiesTest  ,
InternalKeyPropertiesCollector   
)

Definition at line 200 of file table_properties_collector_test.cc.

References rocksdb::TableBuilder::Add(), ASSERT_EQ, ASSERT_OK, ASSERT_TRUE, comparator, rocksdb::Options::comparator, rocksdb::FakeWritableFile::contents(), rocksdb::TableBuilder::Finish(), GetDeletedKeys(), GetVarint32(), rocksdb::Options::info_log, kTypeDeletion, kTypeValue, MakeBuilder(), OpenTable(), SanitizeOptions(), and rocksdb::Options::table_properties_collectors.

200  {
201  InternalKey keys[] = {
202  InternalKey("A", 0, ValueType::kTypeValue),
203  InternalKey("B", 0, ValueType::kTypeValue),
204  InternalKey("C", 0, ValueType::kTypeValue),
205  InternalKey("W", 0, ValueType::kTypeDeletion),
206  InternalKey("X", 0, ValueType::kTypeDeletion),
207  InternalKey("Y", 0, ValueType::kTypeDeletion),
208  InternalKey("Z", 0, ValueType::kTypeDeletion),
209  };
210 
211  for (bool sanitized : { false, true }) {
212  std::unique_ptr<TableBuilder> builder;
213  std::unique_ptr<FakeWritableFile> writable;
214  Options options;
215  if (sanitized) {
216  options.table_properties_collectors = {
217  std::make_shared<RegularKeysStartWithA>()
218  };
219  // with sanitization, even regular properties collector will be able to
220  // handle internal keys.
221  auto comparator = options.comparator;
222  // HACK: Set options.info_log to avoid writing log in
223  // SanitizeOptions().
224  options.info_log = std::make_shared<DumbLogger>();
225  options = SanitizeOptions(
226  "db", // just a place holder
227  nullptr, // with skip internal key comparator
228  nullptr, // don't care filter policy
229  options
230  );
231  options.comparator = comparator;
232  } else {
233  options.table_properties_collectors = {
234  std::make_shared<InternalKeyPropertiesCollector>()
235  };
236  }
237 
238  MakeBuilder(options, &writable, &builder);
239  for (const auto& k : keys) {
240  builder->Add(k.Encode(), "val");
241  }
242 
243  ASSERT_OK(builder->Finish());
244 
245  std::unique_ptr<TableReader> table_reader;
246  OpenTable(options, writable->contents(), &table_reader);
247  const auto& properties =
248  table_reader->GetTableProperties().user_collected_properties;
249 
250  uint64_t deleted = GetDeletedKeys(properties);
251  ASSERT_EQ(4u, deleted);
252 
253  if (sanitized) {
254  uint32_t starts_with_A = 0;
255  Slice key(properties.at("Count"));
256  ASSERT_TRUE(GetVarint32(&key, &starts_with_A));
257  ASSERT_EQ(1u, starts_with_A);
258  }
259  }
260 }

Here is the call graph for this function:

rocksdb::TEST ( CorruptionTest  ,
Recovery   
)

Definition at line 201 of file corruption_test.cc.

References rocksdb::log::kBlockSize, and kLogFile.

201  {
202  Build(100);
203  Check(100, 100);
204  Corrupt(kLogFile, 19, 1); // WriteBatch tag for first record
205  Corrupt(kLogFile, log::kBlockSize + 1000, 1); // Somewhere in second block
206  Reopen();
207 
208  // The 64 records in the first two log blocks are completely lost.
209  Check(36, 36);
210 }
rocksdb::TEST ( PerfContextTest  ,
KeyComparisonCount   
)

Definition at line 207 of file perf_context_test.cc.

References kDisable, kEnableCount, kEnableTime, ProfileKeyComparison(), and SetPerfLevel().

207  {
210 
213 
216 }

Here is the call graph for this function:

rocksdb::TEST ( CorruptionTest  ,
RecoverWriteError   
)

Definition at line 212 of file corruption_test.cc.

References ASSERT_TRUE, env_, and rocksdb::Status::ok().

212  {
213  env_.writable_file_error_ = true;
214  Status s = TryReopen();
215  ASSERT_TRUE(!s.ok());
216 }

Here is the call graph for this function:

rocksdb::TEST ( CorruptionTest  ,
NewFileErrorDuringWrite   
)

Definition at line 218 of file corruption_test.cc.

References ASSERT_GE, ASSERT_TRUE, db_, env_, kValueSize, rocksdb::Status::ok(), rocksdb::WriteBatch::Put(), rocksdb::crc32c::Value(), rocksdb::DBImpl::Write(), and rocksdb::Options::write_buffer_size.

218  {
219  // Do enough writing to force minor compaction
220  env_.writable_file_error_ = true;
221  const int num = 3 + (Options().write_buffer_size / kValueSize);
222  std::string value_storage;
223  Status s;
224  bool failed = false;
225  for (int i = 0; i < num; i++) {
226  WriteBatch batch;
227  batch.Put("a", Value(100, &value_storage));
228  s = db_->Write(WriteOptions(), &batch);
229  if (!s.ok()) {
230  failed = true;
231  }
232  ASSERT_TRUE(!failed || !s.ok());
233  }
234  ASSERT_TRUE(!s.ok());
235  ASSERT_GE(env_.num_writable_file_errors_, 1);
236  env_.writable_file_error_ = false;
237  Reopen();
238 }

Here is the call graph for this function:

rocksdb::TEST ( PrefixTest  ,
PrefixHash   
)

Definition at line 221 of file prefix_test.cc.

References rocksdb::HistogramImpl::Add(), ASSERT_EQ, ASSERT_OK, ASSERT_TRUE, db, rocksdb::Env::Default(), rocksdb::DBImpl::Delete(), DestroyDB(), rocksdb::StopWatchNano::ElapsedNanos(), kDbName, rocksdb::DBImpl::NewIterator(), OpenDb(), perf_context, rocksdb::TestKey::prefix, std::chrono::prefix, rocksdb::ReadOptions::prefix, rocksdb::DBImpl::Put(), rocksdb::PerfContext::Reset(), SliceToTestKey(), TestKeyToSlice(), beast::IP::to_string(), rocksdb::HistogramImpl::ToString(), rocksdb::PerfContext::user_key_comparison_count, and value.

221  {
222 
223  DestroyDB(kDbName, Options());
224  auto db = OpenDb();
225  WriteOptions write_options;
226  ReadOptions read_options;
227 
228  std::vector<uint64_t> prefixes;
229  for (uint64_t i = 0; i < FLAGS_total_prefixes; ++i) {
230  prefixes.push_back(i);
231  }
232 
233  if (FLAGS_random_prefix) {
234  std::random_shuffle(prefixes.begin(), prefixes.end());
235  }
236 
237  // insert x random prefix, each with y continuous element.
238  HistogramImpl hist_put_time;
239  HistogramImpl hist_put_comparison;
240 
241  for (auto prefix : prefixes) {
242  for (uint64_t sorted = 0; sorted < FLAGS_items_per_prefix; sorted++) {
243  TestKey test_key(prefix, sorted);
244 
245  Slice key = TestKeyToSlice(test_key);
246  std::string value = "v" + std::to_string(sorted);
247 
249  StopWatchNano timer(Env::Default(), true);
250  ASSERT_OK(db->Put(write_options, key, value));
251  hist_put_time.Add(timer.ElapsedNanos());
252  hist_put_comparison.Add(perf_context.user_key_comparison_count);
253  }
254  }
255 
256  std::cout << "Put key comparison: \n" << hist_put_comparison.ToString()
257  << "Put time: \n" << hist_put_time.ToString();
258 
259 
260  // test seek existing keys
261  HistogramImpl hist_seek_time;
262  HistogramImpl hist_seek_comparison;
263 
264  for (auto prefix : prefixes) {
265  TestKey test_key(prefix, 0);
266  Slice key = TestKeyToSlice(test_key);
267  std::string value = "v" + std::to_string(0);
268 
269  Slice key_prefix;
270  if (FLAGS_use_prefix_hash_memtable) {
271  key_prefix = options.prefix_extractor->Transform(key);
272  read_options.prefix = &key_prefix;
273  }
274  std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
275 
277  StopWatchNano timer(Env::Default(), true);
278  uint64_t total_keys = 0;
279  for (iter->Seek(key); iter->Valid(); iter->Next()) {
280  if (FLAGS_trigger_deadlock) {
281  std::cout << "Behold the deadlock!\n";
282  db->Delete(write_options, iter->key());
283  }
284  auto test_key = SliceToTestKey(iter->key());
285  if (test_key->prefix != prefix) break;
286  total_keys++;
287  }
288  hist_seek_time.Add(timer.ElapsedNanos());
289  hist_seek_comparison.Add(perf_context.user_key_comparison_count);
290  ASSERT_EQ(total_keys, FLAGS_items_per_prefix);
291  }
292 
293  std::cout << "Seek key comparison: \n"
294  << hist_seek_comparison.ToString()
295  << "Seek time: \n"
296  << hist_seek_time.ToString();
297 
298  // test non-existing keys
299  HistogramImpl hist_no_seek_time;
300  HistogramImpl hist_no_seek_comparison;
301 
302  for (auto prefix = FLAGS_total_prefixes;
303  prefix < FLAGS_total_prefixes + 100;
304  prefix++) {
305  TestKey test_key(prefix, 0);
306  Slice key = TestKeyToSlice(test_key);
307 
308  if (FLAGS_use_prefix_hash_memtable) {
309  Slice key_prefix = options.prefix_extractor->Transform(key);
310  read_options.prefix = &key_prefix;
311  }
312  std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
313 
315  StopWatchNano timer(Env::Default(), true);
316  iter->Seek(key);
317  hist_no_seek_time.Add(timer.ElapsedNanos());
318  hist_no_seek_comparison.Add(perf_context.user_key_comparison_count);
319  ASSERT_TRUE(!iter->Valid());
320  }
321 
322  std::cout << "non-existing Seek key comparison: \n"
323  << hist_no_seek_comparison.ToString()
324  << "non-existing Seek time: \n"
325  << hist_no_seek_time.ToString();
326 }

Here is the call graph for this function:

rocksdb::TEST ( DeleteFileTest  ,
DeleteFileWithIterator   
)

Definition at line 222 of file deletefile_test.cc.

References ASSERT_EQ, ASSERT_TRUE, db_, rocksdb::DBImpl::DeleteFile(), rocksdb::DBImpl::GetLiveFilesMetaData(), rocksdb::DBImpl::NewIterator(), rocksdb::Iterator::Next(), rocksdb::Status::ok(), rocksdb::Iterator::SeekToFirst(), rocksdb::Status::ToString(), and rocksdb::Iterator::Valid().

222  {
223  CreateTwoLevels();
224  ReadOptions options;
225  Iterator* it = db_->NewIterator(options);
226  std::vector<LiveFileMetaData> metadata;
227  db_->GetLiveFilesMetaData(&metadata);
228 
229  std::string level2file = "";
230 
231  ASSERT_EQ((int)metadata.size(), 2);
232  if (metadata[0].level == 1) {
233  level2file = metadata[1].name;
234  } else {
235  level2file = metadata[0].name;
236  }
237 
238  Status status = db_->DeleteFile(level2file);
239  fprintf(stdout, "Deletion status %s: %s\n",
240  level2file.c_str(), status.ToString().c_str());
241  ASSERT_TRUE(status.ok());
242  it->SeekToFirst();
243  int numKeysIterated = 0;
244  while(it->Valid()) {
245  numKeysIterated++;
246  it->Next();
247  }
248  ASSERT_EQ(numKeysIterated, 50000);
249  delete it;
250  CloseDB();
251 }

Here is the call graph for this function:

rocksdb::TEST ( StringAppendOperatorTest  ,
SimpleTest   
)

Definition at line 222 of file stringappend_test.cc.

References rocksdb::StringLists::Append(), ASSERT_EQ, ASSERT_TRUE, db, rocksdb::StringLists::Get(), and OpenDb().

222  {
223  auto db = OpenDb(',');
224  StringLists slists(db);
225 
226  slists.Append("k1", "v1");
227  slists.Append("k1", "v2");
228  slists.Append("k1", "v3");
229 
230  std::string res;
231  bool status = slists.Get("k1", &res);
232 
233  ASSERT_TRUE(status);
234  ASSERT_EQ(res, "v1,v2,v3");
235 }

Here is the call graph for this function:

rocksdb::TEST ( PerfContextTest  ,
SeekKeyComparison   
)

Definition at line 230 of file perf_context_test.cc.

References rocksdb::HistogramImpl::Add(), ASSERT_EQ, ASSERT_TRUE, db, rocksdb::Env::Default(), DestroyDB(), rocksdb::StopWatchNano::ElapsedNanos(), FLAGS_random_key, FLAGS_total_keys, kDbName, kEnableTime, rocksdb::DBImpl::NewIterator(), OpenDb(), perf_context, rocksdb::DBImpl::Put(), rocksdb::PerfContext::Reset(), SetPerfLevel(), rocksdb::StopWatchNano::Start(), beast::IP::to_string(), rocksdb::HistogramImpl::ToString(), rocksdb::PerfContext::user_key_comparison_count, value, and rocksdb::PerfContext::wal_write_time.

230  {
231  DestroyDB(kDbName, Options());
232  auto db = OpenDb();
233  WriteOptions write_options;
234  ReadOptions read_options;
235 
236  std::cout << "Inserting " << FLAGS_total_keys << " key/value pairs\n...\n";
237 
238  std::vector<int> keys;
239  for (int i = 0; i < FLAGS_total_keys; ++i) {
240  keys.push_back(i);
241  }
242 
243  if (FLAGS_random_key) {
244  std::random_shuffle(keys.begin(), keys.end());
245  }
246 
247  HistogramImpl hist_put_time;
248  HistogramImpl hist_wal_time;
249  HistogramImpl hist_time_diff;
250 
252  StopWatchNano timer(Env::Default());
253  for (const int i : keys) {
254  std::string key = "k" + std::to_string(i);
255  std::string value = "v" + std::to_string(i);
256 
258  timer.Start();
259  db->Put(write_options, key, value);
260  auto put_time = timer.ElapsedNanos();
261  hist_put_time.Add(put_time);
262  hist_wal_time.Add(perf_context.wal_write_time);
263  hist_time_diff.Add(put_time - perf_context.wal_write_time);
264  }
265 
266  std::cout << "Put time:\n" << hist_put_time.ToString()
267  << "WAL time:\n" << hist_wal_time.ToString()
268  << "time diff:\n" << hist_time_diff.ToString();
269 
270  HistogramImpl hist_seek;
271  HistogramImpl hist_next;
272 
273  for (int i = 0; i < FLAGS_total_keys; ++i) {
274  std::string key = "k" + std::to_string(i);
275  std::string value = "v" + std::to_string(i);
276 
277  std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
279  iter->Seek(key);
280  ASSERT_TRUE(iter->Valid());
281  ASSERT_EQ(iter->value().ToString(), value);
283  }
284 
285  std::unique_ptr<Iterator> iter(db->NewIterator(read_options));
286  for (iter->SeekToFirst(); iter->Valid();) {
288  iter->Next();
290  }
291 
292  std::cout << "Seek:\n" << hist_seek.ToString()
293  << "Next:\n" << hist_next.ToString();
294 }

Here is the call graph for this function:

rocksdb::TEST ( WriteBatchTest  ,
PutGatherSlices   
)

Definition at line 230 of file write_batch_test.cc.

References ASSERT_EQ, rocksdb::WriteBatch::Count(), PrintContents(), rocksdb::WriteBatch::Put(), and rocksdb::WriteBatchInternal::SetSequence().

230  {
231  WriteBatch batch;
232  batch.Put(Slice("foo"), Slice("bar"));
233 
234  {
235  // Try a write where the key is one slice but the value is two
236  Slice key_slice("baz");
237  Slice value_slices[2] = { Slice("header"), Slice("payload") };
238  batch.Put(SliceParts(&key_slice, 1),
239  SliceParts(value_slices, 2));
240  }
241 
242  {
243  // One where the key is composite but the value is a single slice
244  Slice key_slices[3] = { Slice("key"), Slice("part2"), Slice("part3") };
245  Slice value_slice("value");
246  batch.Put(SliceParts(key_slices, 3),
247  SliceParts(&value_slice, 1));
248  }
249 
250  WriteBatchInternal::SetSequence(&batch, 100);
251  ASSERT_EQ("Put(baz, headerpayload)@101"
252  "Put(foo, bar)@100"
253  "Put(keypart2part3, value)@102",
254  PrintContents(&batch));
255  ASSERT_EQ(3, batch.Count());
256 }

Here is the call graph for this function:

rocksdb::TEST ( StringAppendOperatorTest  ,
SimpleDelimiterTest   
)

Definition at line 237 of file stringappend_test.cc.

References rocksdb::StringLists::Append(), ASSERT_EQ, db, rocksdb::StringLists::Get(), and OpenDb().

237  {
238  auto db = OpenDb('|');
239  StringLists slists(db);
240 
241  slists.Append("k1", "v1");
242  slists.Append("k1", "v2");
243  slists.Append("k1", "v3");
244 
245  std::string res;
246  slists.Get("k1", &res);
247  ASSERT_EQ(res, "v1|v2|v3");
248 }

Here is the call graph for this function:

rocksdb::TEST ( CacheTest  ,
EvictionPolicyRef2   
)

Definition at line 239 of file cache_test.cc.

References ASSERT_EQ, and EncodeKey().

239  {
240  std::vector<Cache::Handle*> handles;
241 
242  Insert(100, 101);
243  // Insert entries much more than Cache capacity
244  for (int i = 0; i < kCacheSize + 100; i++) {
245  Insert(1000 + i, 2000 + i);
246  if (i < kCacheSize ) {
247  handles.push_back(cache_->Lookup(EncodeKey(1000 + i)));
248  }
249  }
250 
251  // Make sure referenced keys are also possible to be deleted
252  // if there are not sufficient non-referenced keys
253  for (int i = 0; i < 5; i++) {
254  ASSERT_EQ(-1, Lookup(1000 + i));
255  }
256 
257  for (int i = kCacheSize; i < kCacheSize + 100; i++) {
258  ASSERT_EQ(2000 + i, Lookup(1000 + i));
259  }
260  ASSERT_EQ(-1, Lookup(100));
261 
262  // Cleaning up all the handles
263  while (handles.size() > 0) {
264  cache_->Release(handles.back());
265  handles.pop_back();
266  }
267 }

Here is the call graph for this function:

rocksdb::TEST ( CorruptionTest  ,
TableFile   
)

Definition at line 240 of file corruption_test.cc.

References db_, kTableFile, rocksdb::DBImpl::TEST_CompactRange(), and rocksdb::DBImpl::TEST_FlushMemTable().

240  {
241  Build(100);
242  DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
243  dbi->TEST_FlushMemTable();
244  dbi->TEST_CompactRange(0, nullptr, nullptr);
245  dbi->TEST_CompactRange(1, nullptr, nullptr);
246 
247  Corrupt(kTableFile, 100, 1);
248  Check(99, 99);
249 }

Here is the call graph for this function:

rocksdb::TEST ( StringAppendOperatorTest  ,
OneValueNoDelimiterTest   
)

Definition at line 250 of file stringappend_test.cc.

References rocksdb::StringLists::Append(), ASSERT_EQ, db, rocksdb::StringLists::Get(), and OpenDb().

250  {
251  auto db = OpenDb('!');
252  StringLists slists(db);
253 
254  slists.Append("random_key", "single_val");
255 
256  std::string res;
257  slists.Get("random_key", &res);
258  ASSERT_EQ(res, "single_val");
259 }

Here is the call graph for this function:

rocksdb::TEST ( CorruptionTest  ,
TableFileIndexData   
)

Definition at line 251 of file corruption_test.cc.

References db_, kTableFile, and rocksdb::DBImpl::TEST_FlushMemTable().

251  {
252  Build(10000); // Enough to build multiple Tables
253  DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
254  dbi->TEST_FlushMemTable();
255 
256  Corrupt(kTableFile, -2000, 500);
257  Reopen();
258  Check(5000, 9999);
259 }

Here is the call graph for this function:

rocksdb::TEST ( DeleteFileTest  ,
DeleteLogFiles   
)

Definition at line 253 of file deletefile_test.cc.

References ASSERT_EQ, ASSERT_GT, ASSERT_OK, ASSERT_TRUE, db_, rocksdb::DBImpl::DeleteFile(), env_, rocksdb::Env::FileExists(), rocksdb::DBImpl::Flush(), rocksdb::DBImpl::GetSortedWalFiles(), kAliveLogFile, kArchivedLogFile, ripple::Resource::ok, options_, and rocksdb::Options::wal_dir.

253  {
254  AddKeys(10, 0);
255  VectorLogPtr logfiles;
256  db_->GetSortedWalFiles(logfiles);
257  ASSERT_GT(logfiles.size(), 0UL);
258  // Take the last log file which is expected to be alive and try to delete it
259  // Should not succeed because live logs are not allowed to be deleted
260  std::unique_ptr<LogFile> alive_log = std::move(logfiles.back());
261  ASSERT_EQ(alive_log->Type(), kAliveLogFile);
262  ASSERT_TRUE(env_->FileExists(options_.wal_dir + "/" + alive_log->PathName()));
263  fprintf(stdout, "Deleting alive log file %s\n",
264  alive_log->PathName().c_str());
265  ASSERT_TRUE(!db_->DeleteFile(alive_log->PathName()).ok());
266  ASSERT_TRUE(env_->FileExists(options_.wal_dir + "/" + alive_log->PathName()));
267  logfiles.clear();
268 
269  // Call Flush to bring about a new working log file and add more keys
270  // Call Flush again to flush out memtable and move alive log to archived log
271  // and try to delete the archived log file
272  FlushOptions fopts;
273  db_->Flush(fopts);
274  AddKeys(10, 0);
275  db_->Flush(fopts);
276  db_->GetSortedWalFiles(logfiles);
277  ASSERT_GT(logfiles.size(), 0UL);
278  std::unique_ptr<LogFile> archived_log = std::move(logfiles.front());
279  ASSERT_EQ(archived_log->Type(), kArchivedLogFile);
280  ASSERT_TRUE(env_->FileExists(options_.wal_dir + "/" +
281  archived_log->PathName()));
282  fprintf(stdout, "Deleting archived log file %s\n",
283  archived_log->PathName().c_str());
284  ASSERT_OK(db_->DeleteFile(archived_log->PathName()));
285  ASSERT_TRUE(!env_->FileExists(options_.wal_dir + "/" +
286  archived_log->PathName()));
287  CloseDB();
288 }

Here is the call graph for this function:

rocksdb::TEST ( RedisListsTest  ,
InsertTest   
)

Definition at line 253 of file redis_lists_test.cc.

References ASSERT_EQ, ASSERT_TRUE, rocksdb::RedisLists::Index(), rocksdb::RedisLists::InsertAfter(), rocksdb::RedisLists::InsertBefore(), rocksdb::RedisLists::Length(), and rocksdb::RedisLists::PushLeft().

253  {
254  RedisLists redis(kDefaultDbName, options, true);
255 
256  string tempv; // Used below for all Index(), PopRight(), PopLeft()
257 
258  // Insert on empty list (return 0, and do not crash)
259  ASSERT_EQ(redis.InsertBefore("k1", "non-exist", "a"), 0);
260  ASSERT_EQ(redis.InsertAfter("k1", "other-non-exist", "c"), 0);
261  ASSERT_EQ(redis.Length("k1"), 0);
262 
263  // Push some preliminary stuff [g, f, e, d, c, b, a]
264  redis.PushLeft("k1", "a");
265  redis.PushLeft("k1", "b");
266  redis.PushLeft("k1", "c");
267  redis.PushLeft("k1", "d");
268  redis.PushLeft("k1", "e");
269  redis.PushLeft("k1", "f");
270  redis.PushLeft("k1", "g");
271  ASSERT_EQ(redis.Length("k1"), 7);
272 
273  // Test InsertBefore
274  int newLength = redis.InsertBefore("k1", "e", "hello");
275  ASSERT_EQ(newLength, 8);
276  ASSERT_EQ(redis.Length("k1"), newLength);
277  ASSERT_TRUE(redis.Index("k1", 1, &tempv));
278  ASSERT_EQ(tempv, "f");
279  ASSERT_TRUE(redis.Index("k1", 3, &tempv));
280  ASSERT_EQ(tempv, "e");
281  ASSERT_TRUE(redis.Index("k1", 2, &tempv));
282  ASSERT_EQ(tempv, "hello");
283 
284  // Test InsertAfter
285  newLength = redis.InsertAfter("k1", "c", "bye");
286  ASSERT_EQ(newLength, 9);
287  ASSERT_EQ(redis.Length("k1"), newLength);
288  ASSERT_TRUE(redis.Index("k1", 6, &tempv));
289  ASSERT_EQ(tempv, "bye");
290 
291  // Test bad value on InsertBefore
292  newLength = redis.InsertBefore("k1", "yo", "x");
293  ASSERT_EQ(newLength, 9);
294  ASSERT_EQ(redis.Length("k1"), newLength);
295 
296  // Test bad value on InsertAfter
297  newLength = redis.InsertAfter("k1", "xxxx", "y");
298  ASSERT_EQ(newLength, 9);
299  ASSERT_EQ(redis.Length("k1"), newLength);
300 
301  // Test InsertBefore beginning
302  newLength = redis.InsertBefore("k1", "g", "begggggggggggggggg");
303  ASSERT_EQ(newLength, 10);
304  ASSERT_EQ(redis.Length("k1"), newLength);
305 
306  // Test InsertAfter end
307  newLength = redis.InsertAfter("k1", "a", "enddd");
308  ASSERT_EQ(newLength, 11);
309  ASSERT_EQ(redis.Length("k1"), newLength);
310 
311  // Make sure nothing weird happened.
312  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
313  ASSERT_EQ(tempv, "begggggggggggggggg");
314  ASSERT_TRUE(redis.Index("k1", 1, &tempv));
315  ASSERT_EQ(tempv, "g");
316  ASSERT_TRUE(redis.Index("k1", 2, &tempv));
317  ASSERT_EQ(tempv, "f");
318  ASSERT_TRUE(redis.Index("k1", 3, &tempv));
319  ASSERT_EQ(tempv, "hello");
320  ASSERT_TRUE(redis.Index("k1", 4, &tempv));
321  ASSERT_EQ(tempv, "e");
322  ASSERT_TRUE(redis.Index("k1", 5, &tempv));
323  ASSERT_EQ(tempv, "d");
324  ASSERT_TRUE(redis.Index("k1", 6, &tempv));
325  ASSERT_EQ(tempv, "c");
326  ASSERT_TRUE(redis.Index("k1", 7, &tempv));
327  ASSERT_EQ(tempv, "bye");
328  ASSERT_TRUE(redis.Index("k1", 8, &tempv));
329  ASSERT_EQ(tempv, "b");
330  ASSERT_TRUE(redis.Index("k1", 9, &tempv));
331  ASSERT_EQ(tempv, "a");
332  ASSERT_TRUE(redis.Index("k1", 10, &tempv));
333  ASSERT_EQ(tempv, "enddd");
334 }

Here is the call graph for this function:

rocksdb::TEST ( Coding  ,
BitStreamConvenienceFuncs   
)

Definition at line 259 of file coding_test.cc.

References ASSERT_EQ, BitStreamGetInt(), and BitStreamPutInt().

259  {
260  std::string bytes(1, '\0');
261 
262  // Check that independent changes to byte are preserved.
263  BitStreamPutInt(&bytes, 0, 2, 3);
264  BitStreamPutInt(&bytes, 3, 2, 3);
265  BitStreamPutInt(&bytes, 6, 2, 3);
266  ASSERT_EQ((unsigned char)bytes[0], (unsigned char)(3 + (3 << 3) + (3 << 6)));
267  ASSERT_EQ(BitStreamGetInt(&bytes, 0, 2), 3u);
268  ASSERT_EQ(BitStreamGetInt(&bytes, 3, 2), 3u);
269  ASSERT_EQ(BitStreamGetInt(&bytes, 6, 2), 3u);
270  Slice slice(bytes);
271  ASSERT_EQ(BitStreamGetInt(&slice, 0, 2), 3u);
272  ASSERT_EQ(BitStreamGetInt(&slice, 3, 2), 3u);
273  ASSERT_EQ(BitStreamGetInt(&slice, 6, 2), 3u);
274 
275  // Test overlapping crossing over byte boundaries
276  bytes = std::string(2, '\0');
277  BitStreamPutInt(&bytes, 6, 4, 15);
278  ASSERT_EQ((unsigned char)bytes[0], 3 << 6);
279  ASSERT_EQ((unsigned char)bytes[1], 3);
280  ASSERT_EQ(BitStreamGetInt(&bytes, 6, 4), 15u);
281  slice = Slice(bytes);
282  ASSERT_EQ(BitStreamGetInt(&slice, 6, 4), 15u);
283 
284  // Test 64-bit number
285  bytes = std::string(64/8, '\0');
286  BitStreamPutInt(&bytes, 0, 64, (uint64_t)(-1));
287  ASSERT_EQ(BitStreamGetInt(&bytes, 0, 64), (uint64_t)(-1));
288  slice = Slice(bytes);
289  ASSERT_EQ(BitStreamGetInt(&slice, 0, 64), (uint64_t)(-1));
290 }

Here is the call graph for this function:

rocksdb::TEST ( CorruptionTest  ,
MissingDescriptor   
)

Definition at line 261 of file corruption_test.cc.

References RepairDB().

261  {
262  Build(1000);
263  RepairDB();
264  Reopen();
265  Check(1000, 1000);
266 }

Here is the call graph for this function:

rocksdb::TEST ( StringAppendOperatorTest  ,
VariousKeys   
)

Definition at line 261 of file stringappend_test.cc.

References rocksdb::StringLists::Append(), ASSERT_EQ, ASSERT_TRUE, db, rocksdb::StringLists::Get(), and OpenDb().

261  {
262  auto db = OpenDb('\n');
263  StringLists slists(db);
264 
265  slists.Append("c", "asdasd");
266  slists.Append("a", "x");
267  slists.Append("b", "y");
268  slists.Append("a", "t");
269  slists.Append("a", "r");
270  slists.Append("b", "2");
271  slists.Append("c", "asdasd");
272 
273  std::string a, b, c;
274  bool sa, sb, sc;
275  sa = slists.Get("a", &a);
276  sb = slists.Get("b", &b);
277  sc = slists.Get("c", &c);
278 
279  ASSERT_TRUE(sa && sb && sc); // All three keys should have been found
280 
281  ASSERT_EQ(a, "x\nt\nr");
282  ASSERT_EQ(b, "y\n2");
283  ASSERT_EQ(c, "asdasd\nasdasd");
284 }

Here is the call graph for this function:

rocksdb::TEST ( CorruptionTest  ,
SequenceNumberRecovery   
)

Definition at line 268 of file corruption_test.cc.

References ASSERT_EQ, ASSERT_OK, db_, rocksdb::DBImpl::Get(), rocksdb::DBImpl::Put(), and RepairDB().

268  {
269  ASSERT_OK(db_->Put(WriteOptions(), "foo", "v1"));
270  ASSERT_OK(db_->Put(WriteOptions(), "foo", "v2"));
271  ASSERT_OK(db_->Put(WriteOptions(), "foo", "v3"));
272  ASSERT_OK(db_->Put(WriteOptions(), "foo", "v4"));
273  ASSERT_OK(db_->Put(WriteOptions(), "foo", "v5"));
274  RepairDB();
275  Reopen();
276  std::string v;
277  ASSERT_OK(db_->Get(ReadOptions(), "foo", &v));
278  ASSERT_EQ("v5", v);
279  // Write something. If sequence number was not recovered properly,
280  // it will be hidden by an earlier write.
281  ASSERT_OK(db_->Put(WriteOptions(), "foo", "v6"));
282  ASSERT_OK(db_->Get(ReadOptions(), "foo", &v));
283  ASSERT_EQ("v6", v);
284  Reopen();
285  ASSERT_OK(db_->Get(ReadOptions(), "foo", &v));
286  ASSERT_EQ("v6", v);
287 }

Here is the call graph for this function:

rocksdb::TEST ( CacheTest  ,
EvictionPolicyRefLargeScanLimit   
)

Definition at line 269 of file cache_test.cc.

References ASSERT_EQ, and EncodeKey().

269  {
270  std::vector<Cache::Handle*> handles2;
271 
272  // Cache2 has a cache RemoveScanCountLimit higher than cache size
273  // so it would trigger a boundary condition.
274 
275  // Populate the cache with 10 more keys than its size.
276  // Reference all keys except one close to the end.
277  for (int i = 0; i < kCacheSize2 + 10; i++) {
278  Insert2(1000 + i, 2000+i);
279  if (i != kCacheSize2 ) {
280  handles2.push_back(cache2_->Lookup(EncodeKey(1000 + i)));
281  }
282  }
283 
284  // Make sure referenced keys are also possible to be deleted
285  // if there are not sufficient non-referenced keys
286  for (int i = 0; i < 3; i++) {
287  ASSERT_EQ(-1, Lookup2(1000 + i));
288  }
289  // The non-referenced value is deleted even if it's accessed
290  // recently.
291  ASSERT_EQ(-1, Lookup2(1000 + kCacheSize2));
292  // Other values recently accessed are not deleted since they
293  // are referenced.
294  for (int i = kCacheSize2 - 10; i < kCacheSize2 + 10; i++) {
295  if (i != kCacheSize2) {
296  ASSERT_EQ(2000 + i, Lookup2(1000 + i));
297  }
298  }
299 
300  // Cleaning up all the handles
301  while (handles2.size() > 0) {
302  cache2_->Release(handles2.back());
303  handles2.pop_back();
304  }
305 }

Here is the call graph for this function:

rocksdb::TEST ( StringAppendOperatorTest  ,
RandomMixGetAppend   
)

Definition at line 287 of file stringappend_test.cc.

References rocksdb::StringLists::Append(), ASSERT_EQ, db, rocksdb::StringLists::Get(), OpenDb(), and rocksdb::Random::Uniform().

287  {
288  auto db = OpenDb(' ');
289  StringLists slists(db);
290 
291  // Generate a list of random keys and values
292  const int kWordCount = 15;
293  std::string words[] = {"sdasd", "triejf", "fnjsdfn", "dfjisdfsf", "342839",
294  "dsuha", "mabuais", "sadajsid", "jf9834hf", "2d9j89",
295  "dj9823jd", "a", "dk02ed2dh", "$(jd4h984$(*", "mabz"};
296  const int kKeyCount = 6;
297  std::string keys[] = {"dhaiusdhu", "denidw", "daisda", "keykey", "muki",
298  "shzassdianmd"};
299 
300  // Will store a local copy of all data in order to verify correctness
301  std::map<std::string, std::string> parallel_copy;
302 
303  // Generate a bunch of random queries (Append and Get)!
304  enum query_t { APPEND_OP, GET_OP, NUM_OPS };
305  Random randomGen(1337); //deterministic seed; always get same results!
306 
307  const int kNumQueries = 30;
308  for (int q=0; q<kNumQueries; ++q) {
309  // Generate a random query (Append or Get) and random parameters
310  query_t query = (query_t)randomGen.Uniform((int)NUM_OPS);
311  std::string key = keys[randomGen.Uniform((int)kKeyCount)];
312  std::string word = words[randomGen.Uniform((int)kWordCount)];
313 
314  // Apply the query and any checks.
315  if (query == APPEND_OP) {
316 
317  // Apply the rocksdb test-harness Append defined above
318  slists.Append(key, word); //apply the rocksdb append
319 
320  // Apply the similar "Append" to the parallel copy
321  if (parallel_copy[key].size() > 0) {
322  parallel_copy[key] += " " + word;
323  } else {
324  parallel_copy[key] = word;
325  }
326 
327  } else if (query == GET_OP) {
328  // Assumes that a non-existent key just returns <empty>
329  std::string res;
330  slists.Get(key, &res);
331  ASSERT_EQ(res, parallel_copy[key]);
332  }
333 
334  }
335 
336 }

Here is the call graph for this function:

rocksdb::TEST ( CorruptionTest  ,
CorruptedDescriptor   
)

Definition at line 289 of file corruption_test.cc.

References ASSERT_EQ, ASSERT_OK, ASSERT_TRUE, db_, rocksdb::DBImpl::Get(), kDescriptorFile, rocksdb::Status::ok(), rocksdb::DBImpl::Put(), RepairDB(), and rocksdb::DBImpl::TEST_FlushMemTable().

289  {
290  ASSERT_OK(db_->Put(WriteOptions(), "foo", "hello"));
291  DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
292  dbi->TEST_FlushMemTable();
293  dbi->TEST_CompactRange(0, nullptr, nullptr);
294 
295  Corrupt(kDescriptorFile, 0, 1000);
296  Status s = TryReopen();
297  ASSERT_TRUE(!s.ok());
298 
299  RepairDB();
300  Reopen();
301  std::string v;
302  ASSERT_OK(db_->Get(ReadOptions(), "foo", &v));
303  ASSERT_EQ("hello", v);
304 }

Here is the call graph for this function:

rocksdb::TEST ( SkipTest  ,
ConcurrentWithoutThreads   
)

Definition at line 293 of file skiplist_test.cc.

References rocksdb::test::RandomSeed(), rocksdb::ConcurrentTest::ReadStep(), test, and rocksdb::ConcurrentTest::WriteStep().

293  {
294  ConcurrentTest test;
295  Random rnd(test::RandomSeed());
296  for (int i = 0; i < 10000; i++) {
297  test.ReadStep(&rnd);
298  test.WriteStep(&rnd);
299  }
300 }

Here is the call graph for this function:

rocksdb::TEST ( CorruptionTest  ,
CompactionInputError   
)

Definition at line 306 of file corruption_test.cc.

References ASSERT_EQ, db_, kTableFile, rocksdb::DBImpl::MaxMemCompactionLevel(), NumberToString(), and rocksdb::DBImpl::TEST_FlushMemTable().

306  {
307  Build(10);
308  DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
309  dbi->TEST_FlushMemTable();
310  const int last = dbi->MaxMemCompactionLevel();
311  ASSERT_EQ(1, Property("rocksdb.num-files-at-level" + NumberToString(last)));
312 
313  Corrupt(kTableFile, 100, 1);
314  Check(9, 9);
315 
316  // Force compactions by writing lots of values
317  Build(10000);
318  Check(10000, 10000);
319 }

Here is the call graph for this function:

rocksdb::TEST ( CacheTest  ,
HeavyEntries   
)

Definition at line 309 of file cache_test.cc.

References ASSERT_EQ, and ASSERT_LE.

309  {
310  // Add a bunch of light and heavy entries and then count the combined
311  // size of items still in the cache, which must be approximately the
312  // same as the total capacity.
313  const int kLight = 1;
314  const int kHeavy = 10;
315  int added = 0;
316  int index = 0;
317  while (added < 2*kCacheSize) {
318  const int weight = (index & 1) ? kLight : kHeavy;
319  Insert(index, 1000+index, weight);
320  added += weight;
321  index++;
322  }
323 
324  int cached_weight = 0;
325  for (int i = 0; i < index; i++) {
326  const int weight = (i & 1 ? kLight : kHeavy);
327  int r = Lookup(i);
328  if (r >= 0) {
329  cached_weight += weight;
330  ASSERT_EQ(1000+i, r);
331  }
332  }
333  ASSERT_LE(cached_weight, kCacheSize + kCacheSize/10);
334 }
rocksdb::TEST ( TtlTest  ,
NoEffect   
)

Definition at line 320 of file ttl_test.cc.

320  {
321  MakeKVMap(kSampleSize_);
322  int boundary1 = kSampleSize_ / 3;
323  int boundary2 = 2 * boundary1;
324 
325  OpenTtl();
326  PutValues(0, boundary1); //T=0: Set1 never deleted
327  SleepCompactCheck(1, 0, boundary1); //T=1: Set1 still there
328  CloseTtl();
329 
330  OpenTtl(0);
331  PutValues(boundary1, boundary2 - boundary1); //T=1: Set2 never deleted
332  SleepCompactCheck(1, 0, boundary2); //T=2: Sets1 & 2 still there
333  CloseTtl();
334 
335  OpenTtl(-1);
336  PutValues(boundary2, kSampleSize_ - boundary2); //T=3: Set3 never deleted
337  SleepCompactCheck(1, 0, kSampleSize_, true); //T=4: Sets 1,2,3 still there
338  CloseTtl();
339 }
rocksdb::TEST ( CorruptionTest  ,
CompactionInputErrorParanoid   
)

Definition at line 321 of file corruption_test.cc.

References ASSERT_EQ, ASSERT_TRUE, db_, kTableFile, rocksdb::DBImpl::NumberLevels(), rocksdb::Status::ok(), rocksdb::Options::paranoid_checks, rocksdb::DBImpl::Put(), rocksdb::DBImpl::TEST_FlushMemTable(), rocksdb::DBImpl::TEST_WaitForCompact(), rocksdb::crc32c::Value(), and rocksdb::Options::write_buffer_size.

321  {
322  Options options;
323  options.paranoid_checks = true;
324  options.write_buffer_size = 1048576;
325  Reopen(&options);
326  DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
327 
328  // Fill levels >= 1 so memtable compaction outputs to level 1
329  for (int level = 1; level < dbi->NumberLevels(); level++) {
330  dbi->Put(WriteOptions(), "", "begin");
331  dbi->Put(WriteOptions(), "~", "end");
332  dbi->TEST_FlushMemTable();
333  }
334 
335  Build(10);
336  dbi->TEST_FlushMemTable();
337  dbi->TEST_WaitForCompact();
338  ASSERT_EQ(1, Property("rocksdb.num-files-at-level0"));
339 
340  Corrupt(kTableFile, 100, 1);
341  Check(9, 9);
342 
343  // Write must eventually fail because of corrupted table
344  Status s;
345  std::string tmp1, tmp2;
346  bool failed = false;
347  for (int i = 0; i < 10000 && s.ok(); i++) {
348  s = db_->Put(WriteOptions(), Key(i, &tmp1), Value(i, &tmp2));
349  if (!s.ok()) {
350  failed = true;
351  }
352  // if one write failed, every subsequent write must fail, too
353  ASSERT_TRUE(!failed || !s.ok()) << "write did not fail in a corrupted db";
354  }
355  ASSERT_TRUE(!s.ok()) << "write did not fail in corrupted paranoid db";
356 }

Here is the call graph for this function:

rocksdb::TEST ( CacheTest  ,
NewId   
)

Definition at line 336 of file cache_test.cc.

References ASSERT_NE.

336  {
337  uint64_t a = cache_->NewId();
338  uint64_t b = cache_->NewId();
339  ASSERT_NE(a, b);
340 }
rocksdb::TEST ( RedisListsTest  ,
SetTest   
)

Definition at line 337 of file redis_lists_test.cc.

References ASSERT_EQ, ASSERT_TRUE, rocksdb::RedisLists::Index(), rocksdb::RedisLists::Length(), rocksdb::RedisLists::PushLeft(), and rocksdb::RedisLists::Set().

337  {
338  RedisLists redis(kDefaultDbName, options, true);
339 
340  string tempv; // Used below for all Index(), PopRight(), PopLeft()
341 
342  // Set on empty list (return false, and do not crash)
343  ASSERT_EQ(redis.Set("k1", 7, "a"), false);
344  ASSERT_EQ(redis.Set("k1", 0, "a"), false);
345  ASSERT_EQ(redis.Set("k1", -49, "cx"), false);
346  ASSERT_EQ(redis.Length("k1"), 0);
347 
348  // Push some preliminary stuff [g, f, e, d, c, b, a]
349  redis.PushLeft("k1", "a");
350  redis.PushLeft("k1", "b");
351  redis.PushLeft("k1", "c");
352  redis.PushLeft("k1", "d");
353  redis.PushLeft("k1", "e");
354  redis.PushLeft("k1", "f");
355  redis.PushLeft("k1", "g");
356  ASSERT_EQ(redis.Length("k1"), 7);
357 
358  // Test Regular Set
359  ASSERT_TRUE(redis.Set("k1", 0, "0"));
360  ASSERT_TRUE(redis.Set("k1", 3, "3"));
361  ASSERT_TRUE(redis.Set("k1", 6, "6"));
362  ASSERT_TRUE(redis.Set("k1", 2, "2"));
363  ASSERT_TRUE(redis.Set("k1", 5, "5"));
364  ASSERT_TRUE(redis.Set("k1", 1, "1"));
365  ASSERT_TRUE(redis.Set("k1", 4, "4"));
366 
367  ASSERT_EQ(redis.Length("k1"), 7); // Size should not change
368  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
369  ASSERT_EQ(tempv, "0");
370  ASSERT_TRUE(redis.Index("k1", 1, &tempv));
371  ASSERT_EQ(tempv, "1");
372  ASSERT_TRUE(redis.Index("k1", 2, &tempv));
373  ASSERT_EQ(tempv, "2");
374  ASSERT_TRUE(redis.Index("k1", 3, &tempv));
375  ASSERT_EQ(tempv, "3");
376  ASSERT_TRUE(redis.Index("k1", 4, &tempv));
377  ASSERT_EQ(tempv, "4");
378  ASSERT_TRUE(redis.Index("k1", 5, &tempv));
379  ASSERT_EQ(tempv, "5");
380  ASSERT_TRUE(redis.Index("k1", 6, &tempv));
381  ASSERT_EQ(tempv, "6");
382 
383  // Set with negative indices
384  ASSERT_TRUE(redis.Set("k1", -7, "a"));
385  ASSERT_TRUE(redis.Set("k1", -4, "d"));
386  ASSERT_TRUE(redis.Set("k1", -1, "g"));
387  ASSERT_TRUE(redis.Set("k1", -5, "c"));
388  ASSERT_TRUE(redis.Set("k1", -2, "f"));
389  ASSERT_TRUE(redis.Set("k1", -6, "b"));
390  ASSERT_TRUE(redis.Set("k1", -3, "e"));
391 
392  ASSERT_EQ(redis.Length("k1"), 7); // Size should not change
393  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
394  ASSERT_EQ(tempv, "a");
395  ASSERT_TRUE(redis.Index("k1", 1, &tempv));
396  ASSERT_EQ(tempv, "b");
397  ASSERT_TRUE(redis.Index("k1", 2, &tempv));
398  ASSERT_EQ(tempv, "c");
399  ASSERT_TRUE(redis.Index("k1", 3, &tempv));
400  ASSERT_EQ(tempv, "d");
401  ASSERT_TRUE(redis.Index("k1", 4, &tempv));
402  ASSERT_EQ(tempv, "e");
403  ASSERT_TRUE(redis.Index("k1", 5, &tempv));
404  ASSERT_EQ(tempv, "f");
405  ASSERT_TRUE(redis.Index("k1", 6, &tempv));
406  ASSERT_EQ(tempv, "g");
407 
408  // Bad indices (just out-of-bounds / off-by-one check)
409  ASSERT_EQ(redis.Set("k1", -8, "off-by-one in negative index"), false);
410  ASSERT_EQ(redis.Set("k1", 7, "off-by-one-error in positive index"), false);
411  ASSERT_EQ(redis.Set("k1", 43892, "big random index should fail"), false);
412  ASSERT_EQ(redis.Set("k1", -21391, "large negative index should fail"), false);
413 
414  // One last check (to make sure nothing weird happened)
415  ASSERT_EQ(redis.Length("k1"), 7); // Size should not change
416  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
417  ASSERT_EQ(tempv, "a");
418  ASSERT_TRUE(redis.Index("k1", 1, &tempv));
419  ASSERT_EQ(tempv, "b");
420  ASSERT_TRUE(redis.Index("k1", 2, &tempv));
421  ASSERT_EQ(tempv, "c");
422  ASSERT_TRUE(redis.Index("k1", 3, &tempv));
423  ASSERT_EQ(tempv, "d");
424  ASSERT_TRUE(redis.Index("k1", 4, &tempv));
425  ASSERT_EQ(tempv, "e");
426  ASSERT_TRUE(redis.Index("k1", 5, &tempv));
427  ASSERT_EQ(tempv, "f");
428  ASSERT_TRUE(redis.Index("k1", 6, &tempv));
429  ASSERT_EQ(tempv, "g");
430 }

Here is the call graph for this function:

rocksdb::TEST ( StringAppendOperatorTest  ,
BIGRandomMixGetAppend   
)

Definition at line 338 of file stringappend_test.cc.

References rocksdb::StringLists::Append(), ASSERT_EQ, db, rocksdb::StringLists::Get(), OpenDb(), and rocksdb::Random::Uniform().

338  {
339  auto db = OpenDb(' ');
340  StringLists slists(db);
341 
342  // Generate a list of random keys and values
343  const int kWordCount = 15;
344  std::string words[] = {"sdasd", "triejf", "fnjsdfn", "dfjisdfsf", "342839",
345  "dsuha", "mabuais", "sadajsid", "jf9834hf", "2d9j89",
346  "dj9823jd", "a", "dk02ed2dh", "$(jd4h984$(*", "mabz"};
347  const int kKeyCount = 6;
348  std::string keys[] = {"dhaiusdhu", "denidw", "daisda", "keykey", "muki",
349  "shzassdianmd"};
350 
351  // Will store a local copy of all data in order to verify correctness
352  std::map<std::string, std::string> parallel_copy;
353 
354  // Generate a bunch of random queries (Append and Get)!
355  enum query_t { APPEND_OP, GET_OP, NUM_OPS };
356  Random randomGen(9138204); // deterministic seed
357 
358  const int kNumQueries = 1000;
359  for (int q=0; q<kNumQueries; ++q) {
360  // Generate a random query (Append or Get) and random parameters
361  query_t query = (query_t)randomGen.Uniform((int)NUM_OPS);
362  std::string key = keys[randomGen.Uniform((int)kKeyCount)];
363  std::string word = words[randomGen.Uniform((int)kWordCount)];
364 
365  //Apply the query and any checks.
366  if (query == APPEND_OP) {
367 
368  // Apply the rocksdb test-harness Append defined above
369  slists.Append(key, word); //apply the rocksdb append
370 
371  // Apply the similar "Append" to the parallel copy
372  if (parallel_copy[key].size() > 0) {
373  parallel_copy[key] += " " + word;
374  } else {
375  parallel_copy[key] = word;
376  }
377 
378  } else if (query == GET_OP) {
379  // Assumes that a non-existent key just returns <empty>
380  std::string res;
381  slists.Get(key, &res);
382  ASSERT_EQ(res, parallel_copy[key]);
383  }
384 
385  }
386 
387 }

Here is the call graph for this function:

rocksdb::TEST ( TtlTest  ,
PresentDuringTTL   
)

Definition at line 342 of file ttl_test.cc.

342  {
343  MakeKVMap(kSampleSize_);
344 
345  OpenTtl(2); // T=0:Open the db with ttl = 2
346  PutValues(0, kSampleSize_); // T=0:Insert Set1. Delete at t=2
347  SleepCompactCheck(1, 0, kSampleSize_, true); // T=1:Set1 should still be there
348  CloseTtl();
349 }
rocksdb::TEST ( TtlTest  ,
AbsentAfterTTL   
)

Definition at line 352 of file ttl_test.cc.

352  {
353  MakeKVMap(kSampleSize_);
354 
355  OpenTtl(1); // T=0:Open the db with ttl = 2
356  PutValues(0, kSampleSize_); // T=0:Insert Set1. Delete at t=2
357  SleepCompactCheck(2, 0, kSampleSize_, false); // T=2:Set1 should not be there
358  CloseTtl();
359 }
rocksdb::TEST ( CacheTest  ,
BadEviction   
)

Definition at line 357 of file cache_test.cc.

References ASSERT_TRUE, deleter(), NewLRUCache(), beast::IP::to_string(), and rocksdb::crc32c::Value().

357  {
358  int n = 10;
359 
360  // a LRUCache with n entries and one shard only
361  std::shared_ptr<Cache> cache = NewLRUCache(n, 0);
362 
363  std::vector<Cache::Handle*> handles(n+1);
364 
365  // Insert n+1 entries, but not releasing.
366  for (int i = 0; i < n+1; i++) {
367  std::string key = std::to_string(i+1);
368  handles[i] = cache->Insert(key, new Value(i+1), 1, &deleter);
369  }
370 
371  // Guess what's in the cache now?
372  for (int i = 0; i < n+1; i++) {
373  std::string key = std::to_string(i+1);
374  auto h = cache->Lookup(key);
375  std::cout << key << (h?" found\n":" not found\n");
376  // Only the first entry should be missing
377  ASSERT_TRUE(h || i == 0);
378  if (h) cache->Release(h);
379  }
380 
381  for (int i = 0; i < n+1; i++) {
382  cache->Release(handles[i]);
383  }
384  std::cout << "Poor entries\n";
385 }

Here is the call graph for this function:

rocksdb::TEST ( CorruptionTest  ,
UnrelatedKeys   
)

Definition at line 358 of file corruption_test.cc.

References ASSERT_EQ, ASSERT_OK, db_, rocksdb::DBImpl::Get(), kTableFile, rocksdb::DBImpl::Put(), rocksdb::DBImpl::TEST_FlushMemTable(), and rocksdb::crc32c::Value().

358  {
359  Build(10);
360  DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
361  dbi->TEST_FlushMemTable();
362  Corrupt(kTableFile, 100, 1);
363 
364  std::string tmp1, tmp2;
365  ASSERT_OK(db_->Put(WriteOptions(), Key(1000, &tmp1), Value(1000, &tmp2)));
366  std::string v;
367  ASSERT_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v));
368  ASSERT_EQ(Value(1000, &tmp2).ToString(), v);
369  dbi->TEST_FlushMemTable();
370  ASSERT_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v));
371  ASSERT_EQ(Value(1000, &tmp2).ToString(), v);
372 }

Here is the call graph for this function:

rocksdb::TEST ( TtlTest  ,
ResetTimestamp   
)

Definition at line 363 of file ttl_test.cc.

363  {
364  MakeKVMap(kSampleSize_);
365 
366  OpenTtl(3);
367  PutValues(0, kSampleSize_); // T=0: Insert Set1. Delete at t=3
368  sleep(2); // T=2
369  PutValues(0, kSampleSize_); // T=2: Insert Set1. Delete at t=5
370  SleepCompactCheck(2, 0, kSampleSize_); // T=4: Set1 should still be there
371  CloseTtl();
372 }
rocksdb::TEST ( EnvPosixTest  ,
PosixRandomRWFileTest   
)

Definition at line 371 of file env_test.cc.

References rocksdb::RandomRWFile::Allocate(), ASSERT_EQ, ASSERT_OK, rocksdb::RandomRWFile::Close(), rocksdb::Slice::compare(), env_, rocksdb::RandomRWFile::Fsync(), rocksdb::Env::NewRandomRWFile(), rocksdb::RandomRWFile::Read(), rocksdb::RandomRWFile::Sync(), rocksdb::test::TmpDir(), rocksdb::EnvOptions::use_mmap_reads, rocksdb::EnvOptions::use_mmap_writes, and rocksdb::RandomRWFile::Write().

371  {
372  EnvOptions soptions;
373  soptions.use_mmap_writes = soptions.use_mmap_reads = false;
374  std::string fname = test::TmpDir() + "/" + "testfile";
375 
376  unique_ptr<RandomRWFile> file;
377  ASSERT_OK(env_->NewRandomRWFile(fname, &file, soptions));
378  // If you run the unit test on tmpfs, then tmpfs might not
379  // support fallocate. It is still better to trigger that
380  // code-path instead of eliminating it completely.
381  file.get()->Allocate(0, 10*1024*1024);
382  ASSERT_OK(file.get()->Write(100, Slice("Hello world")));
383  ASSERT_OK(file.get()->Write(105, Slice("Hello world")));
384  ASSERT_OK(file.get()->Sync());
385  ASSERT_OK(file.get()->Fsync());
386  char scratch[100];
387  Slice result;
388  ASSERT_OK(file.get()->Read(100, 16, &result, scratch));
389  ASSERT_EQ(result.compare("HelloHello world"), 0);
390  ASSERT_OK(file.get()->Close());
391 }

Here is the call graph for this function:

rocksdb::TEST ( SkipTest  ,
Concurrent1   
)

Definition at line 373 of file skiplist_test.cc.

References RunConcurrent().

373 { RunConcurrent(1); }

Here is the call graph for this function:

rocksdb::TEST ( SkipTest  ,
Concurrent2   
)

Definition at line 374 of file skiplist_test.cc.

References RunConcurrent().

374 { RunConcurrent(2); }

Here is the call graph for this function:

rocksdb::TEST ( TtlTest  ,
IterPresentDuringTTL   
)

Definition at line 375 of file ttl_test.cc.

375  {
376  MakeKVMap(kSampleSize_);
377 
378  OpenTtl(2);
379  PutValues(0, kSampleSize_); // T=0: Insert. Delete at t=2
380  SleepCompactCheckIter(1, 0, kSampleSize_); // T=1: Set should be there
381  CloseTtl();
382 }
rocksdb::TEST ( SkipTest  ,
Concurrent3   
)

Definition at line 375 of file skiplist_test.cc.

References RunConcurrent().

375 { RunConcurrent(3); }

Here is the call graph for this function:

rocksdb::TEST ( SkipTest  ,
Concurrent4   
)

Definition at line 376 of file skiplist_test.cc.

References RunConcurrent().

376 { RunConcurrent(4); }

Here is the call graph for this function:

rocksdb::TEST ( SkipTest  ,
Concurrent5   
)

Definition at line 377 of file skiplist_test.cc.

References RunConcurrent().

377 { RunConcurrent(5); }

Here is the call graph for this function:

rocksdb::TEST ( TtlTest  ,
IterAbsentAfterTTL   
)

Definition at line 385 of file ttl_test.cc.

385  {
386  MakeKVMap(kSampleSize_);
387 
388  OpenTtl(1);
389  PutValues(0, kSampleSize_); // T=0: Insert. Delete at t=1
390  SleepCompactCheckIter(2, 0, kSampleSize_, false); // T=2: Should not be there
391  CloseTtl();
392 }
rocksdb::TEST ( StringAppendOperatorTest  ,
PersistentVariousKeys   
)

Definition at line 390 of file stringappend_test.cc.

References rocksdb::StringLists::Append(), ASSERT_EQ, db, rocksdb::StringLists::Get(), and OpenDb().

390  {
391  // Perform the following operations in limited scope
392  {
393  auto db = OpenDb('\n');
394  StringLists slists(db);
395 
396  slists.Append("c", "asdasd");
397  slists.Append("a", "x");
398  slists.Append("b", "y");
399  slists.Append("a", "t");
400  slists.Append("a", "r");
401  slists.Append("b", "2");
402  slists.Append("c", "asdasd");
403 
404  std::string a, b, c;
405  slists.Get("a", &a);
406  slists.Get("b", &b);
407  slists.Get("c", &c);
408 
409  ASSERT_EQ(a, "x\nt\nr");
410  ASSERT_EQ(b, "y\n2");
411  ASSERT_EQ(c, "asdasd\nasdasd");
412  }
413 
414  // Reopen the database (the previous changes should persist / be remembered)
415  {
416  auto db = OpenDb('\n');
417  StringLists slists(db);
418 
419  slists.Append("c", "bbnagnagsx");
420  slists.Append("a", "sa");
421  slists.Append("b", "df");
422  slists.Append("a", "gh");
423  slists.Append("a", "jk");
424  slists.Append("b", "l;");
425  slists.Append("c", "rogosh");
426 
427  // The previous changes should be on disk (L0)
428  // The most recent changes should be in memory (MemTable)
429  // Hence, this will test both Get() paths.
430  std::string a, b, c;
431  slists.Get("a", &a);
432  slists.Get("b", &b);
433  slists.Get("c", &c);
434 
435  ASSERT_EQ(a, "x\nt\nr\nsa\ngh\njk");
436  ASSERT_EQ(b, "y\n2\ndf\nl;");
437  ASSERT_EQ(c, "asdasd\nasdasd\nbbnagnagsx\nrogosh");
438  }
439 
440  // Reopen the database (the previous changes should persist / be remembered)
441  {
442  auto db = OpenDb('\n');
443  StringLists slists(db);
444 
445  // All changes should be on disk. This will test VersionSet Get()
446  std::string a, b, c;
447  slists.Get("a", &a);
448  slists.Get("b", &b);
449  slists.Get("c", &c);
450 
451  ASSERT_EQ(a, "x\nt\nr\nsa\ngh\njk");
452  ASSERT_EQ(b, "y\n2\ndf\nl;");
453  ASSERT_EQ(c, "asdasd\nasdasd\nbbnagnagsx\nrogosh");
454  }
455 }

Here is the call graph for this function:

rocksdb::TEST ( TtlTest  ,
MultiOpenSamePresent   
)

Definition at line 396 of file ttl_test.cc.

396  {
397  MakeKVMap(kSampleSize_);
398 
399  OpenTtl(2);
400  PutValues(0, kSampleSize_); // T=0: Insert. Delete at t=2
401  CloseTtl();
402 
403  OpenTtl(2); // T=0. Delete at t=2
404  SleepCompactCheck(1, 0, kSampleSize_); // T=1: Set should be there
405  CloseTtl();
406 }
rocksdb::TEST ( TtlTest  ,
MultiOpenSameAbsent   
)

Definition at line 410 of file ttl_test.cc.

410  {
411  MakeKVMap(kSampleSize_);
412 
413  OpenTtl(1);
414  PutValues(0, kSampleSize_); // T=0: Insert. Delete at t=1
415  CloseTtl();
416 
417  OpenTtl(1); // T=0.Delete at t=1
418  SleepCompactCheck(2, 0, kSampleSize_, false); // T=2: Set should not be there
419  CloseTtl();
420 }
rocksdb::TEST ( TtlTest  ,
MultiOpenDifferent   
)

Definition at line 423 of file ttl_test.cc.

423  {
424  MakeKVMap(kSampleSize_);
425 
426  OpenTtl(1);
427  PutValues(0, kSampleSize_); // T=0: Insert. Delete at t=1
428  CloseTtl();
429 
430  OpenTtl(3); // T=0: Set deleted at t=3
431  SleepCompactCheck(2, 0, kSampleSize_); // T=2: Set should be there
432  CloseTtl();
433 }
rocksdb::TEST ( RedisListsTest  ,
InsertPushSetTest   
)

Definition at line 433 of file redis_lists_test.cc.

References ASSERT_EQ, ASSERT_TRUE, rocksdb::RedisLists::Index(), rocksdb::RedisLists::InsertAfter(), rocksdb::RedisLists::InsertBefore(), rocksdb::RedisLists::Length(), rocksdb::RedisLists::PushLeft(), rocksdb::RedisLists::PushRight(), rocksdb::RedisLists::Range(), and rocksdb::RedisLists::Set().

433  {
434  RedisLists redis(kDefaultDbName, options, true); // Destructive
435 
436  string tempv; // Used below for all Index(), PopRight(), PopLeft()
437 
438  // A series of pushes and insertions
439  // Will result in [newbegin, z, a, aftera, x, newend]
440  // Also, check the return value sometimes (should return length)
441  int lengthCheck;
442  lengthCheck = redis.PushLeft("k1", "a");
443  ASSERT_EQ(lengthCheck, 1);
444  redis.PushLeft("k1", "z");
445  redis.PushRight("k1", "x");
446  lengthCheck = redis.InsertAfter("k1", "a", "aftera");
447  ASSERT_EQ(lengthCheck , 4);
448  redis.InsertBefore("k1", "z", "newbegin"); // InsertBefore beginning of list
449  redis.InsertAfter("k1", "x", "newend"); // InsertAfter end of list
450 
451  // Check
452  std::vector<std::string> res = redis.Range("k1", 0, -1); // Get the list
453  ASSERT_EQ((int)res.size(), 6);
454  ASSERT_EQ(res[0], "newbegin");
455  ASSERT_EQ(res[5], "newend");
456  ASSERT_EQ(res[3], "aftera");
457 
458  // Testing duplicate values/pivots (multiple occurrences of 'a')
459  ASSERT_TRUE(redis.Set("k1", 0, "a")); // [a, z, a, aftera, x, newend]
460  redis.InsertAfter("k1", "a", "happy"); // [a, happy, z, a, aftera, ...]
461  ASSERT_TRUE(redis.Index("k1", 1, &tempv));
462  ASSERT_EQ(tempv, "happy");
463  redis.InsertBefore("k1", "a", "sad"); // [sad, a, happy, z, a, aftera, ...]
464  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
465  ASSERT_EQ(tempv, "sad");
466  ASSERT_TRUE(redis.Index("k1", 2, &tempv));
467  ASSERT_EQ(tempv, "happy");
468  ASSERT_TRUE(redis.Index("k1", 5, &tempv));
469  ASSERT_EQ(tempv, "aftera");
470  redis.InsertAfter("k1", "a", "zz"); // [sad, a, zz, happy, z, a, aftera, ...]
471  ASSERT_TRUE(redis.Index("k1", 2, &tempv));
472  ASSERT_EQ(tempv, "zz");
473  ASSERT_TRUE(redis.Index("k1", 6, &tempv));
474  ASSERT_EQ(tempv, "aftera");
475  ASSERT_TRUE(redis.Set("k1", 1, "nota")); // [sad, nota, zz, happy, z, a, ...]
476  redis.InsertBefore("k1", "a", "ba"); // [sad, nota, zz, happy, z, ba, a, ...]
477  ASSERT_TRUE(redis.Index("k1", 4, &tempv));
478  ASSERT_EQ(tempv, "z");
479  ASSERT_TRUE(redis.Index("k1", 5, &tempv));
480  ASSERT_EQ(tempv, "ba");
481  ASSERT_TRUE(redis.Index("k1", 6, &tempv));
482  ASSERT_EQ(tempv, "a");
483 
484  // We currently have: [sad, nota, zz, happy, z, ba, a, aftera, x, newend]
485  // redis.Print("k1"); // manually check
486 
487  // Test Inserting before/after non-existent values
488  lengthCheck = redis.Length("k1"); // Ensure that the length doesn't change
489  ASSERT_EQ(lengthCheck, 10);
490  ASSERT_EQ(redis.InsertBefore("k1", "non-exist", "randval"), lengthCheck);
491  ASSERT_EQ(redis.InsertAfter("k1", "nothing", "a"), lengthCheck);
492  ASSERT_EQ(redis.InsertAfter("randKey", "randVal", "ranValue"), 0); // Empty
493  ASSERT_EQ(redis.Length("k1"), lengthCheck); // The length should not change
494 
495  // Simply Test the Set() function
496  redis.Set("k1", 5, "ba2");
497  redis.InsertBefore("k1", "ba2", "beforeba2");
498  ASSERT_TRUE(redis.Index("k1", 4, &tempv));
499  ASSERT_EQ(tempv, "z");
500  ASSERT_TRUE(redis.Index("k1", 5, &tempv));
501  ASSERT_EQ(tempv, "beforeba2");
502  ASSERT_TRUE(redis.Index("k1", 6, &tempv));
503  ASSERT_EQ(tempv, "ba2");
504  ASSERT_TRUE(redis.Index("k1", 7, &tempv));
505  ASSERT_EQ(tempv, "a");
506 
507  // We have: [sad, nota, zz, happy, z, beforeba2, ba2, a, aftera, x, newend]
508 
509  // Set() with negative indices
510  redis.Set("k1", -1, "endprank");
511  ASSERT_TRUE(!redis.Index("k1", 11, &tempv));
512  ASSERT_TRUE(redis.Index("k1", 10, &tempv));
513  ASSERT_EQ(tempv, "endprank"); // Ensure Set worked correctly
514  redis.Set("k1", -11, "t");
515  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
516  ASSERT_EQ(tempv, "t");
517 
518  // Test out of bounds Set
519  ASSERT_EQ(redis.Set("k1", -12, "ssd"), false);
520  ASSERT_EQ(redis.Set("k1", 11, "sasd"), false);
521  ASSERT_EQ(redis.Set("k1", 1200, "big"), false);
522 }

Here is the call graph for this function:

rocksdb::TEST ( TtlTest  ,
ReadOnlyPresentForever   
)

Definition at line 436 of file ttl_test.cc.

436  {
437  MakeKVMap(kSampleSize_);
438 
439  OpenTtl(1); // T=0:Open the db normally
440  PutValues(0, kSampleSize_); // T=0:Insert Set1. Delete at t=1
441  CloseTtl();
442 
443  OpenReadOnlyTtl(1);
444  SleepCompactCheck(2, 0, kSampleSize_); // T=2:Set1 should still be there
445  CloseTtl();
446 }
rocksdb::TEST ( TtlTest  ,
WriteBatchTest   
)

Definition at line 450 of file ttl_test.cc.

450  {
451  MakeKVMap(kSampleSize_);
452  BatchOperation batch_ops[kSampleSize_];
453  for (int i = 0; i < kSampleSize_; i++) {
454  batch_ops[i] = PUT;
455  }
456 
457  OpenTtl(2);
458  MakePutWriteBatch(batch_ops, kSampleSize_);
459  for (int i = 0; i < kSampleSize_ / 2; i++) {
460  batch_ops[i] = DELETE;
461  }
462  MakePutWriteBatch(batch_ops, kSampleSize_ / 2);
463  SleepCompactCheck(0, 0, kSampleSize_ / 2, false);
464  SleepCompactCheck(0, kSampleSize_ / 2, kSampleSize_ - kSampleSize_ / 2);
465  CloseTtl();
466 }
rocksdb::TEST ( StringAppendOperatorTest  ,
PersistentFlushAndCompaction   
)

Definition at line 457 of file stringappend_test.cc.

References rocksdb::StringLists::Append(), ASSERT_EQ, ASSERT_TRUE, rocksdb::DBImpl::CompactRange(), db, rocksdb::DBImpl::Flush(), rocksdb::StringLists::Get(), and OpenDb().

457  {
458  // Perform the following operations in limited scope
459  {
460  auto db = OpenDb('\n');
461  StringLists slists(db);
462  std::string a, b, c;
463  bool success;
464 
465  // Append, Flush, Get
466  slists.Append("c", "asdasd");
468  success = slists.Get("c", &c);
469  ASSERT_TRUE(success);
470  ASSERT_EQ(c, "asdasd");
471 
472  // Append, Flush, Append, Get
473  slists.Append("a", "x");
474  slists.Append("b", "y");
476  slists.Append("a", "t");
477  slists.Append("a", "r");
478  slists.Append("b", "2");
479 
480  success = slists.Get("a", &a);
481  assert(success == true);
482  ASSERT_EQ(a, "x\nt\nr");
483 
484  success = slists.Get("b", &b);
485  assert(success == true);
486  ASSERT_EQ(b, "y\n2");
487 
488  // Append, Get
489  success = slists.Append("c", "asdasd");
490  assert(success);
491  success = slists.Append("b", "monkey");
492  assert(success);
493 
494  // I omit the "assert(success)" checks here.
495  slists.Get("a", &a);
496  slists.Get("b", &b);
497  slists.Get("c", &c);
498 
499  ASSERT_EQ(a, "x\nt\nr");
500  ASSERT_EQ(b, "y\n2\nmonkey");
501  ASSERT_EQ(c, "asdasd\nasdasd");
502  }
503 
504  // Reopen the database (the previous changes should persist / be remembered)
505  {
506  auto db = OpenDb('\n');
507  StringLists slists(db);
508  std::string a, b, c;
509 
510  // Get (Quick check for persistence of previous database)
511  slists.Get("a", &a);
512  ASSERT_EQ(a, "x\nt\nr");
513 
514  //Append, Compact, Get
515  slists.Append("c", "bbnagnagsx");
516  slists.Append("a", "sa");
517  slists.Append("b", "df");
518  db->CompactRange(nullptr, nullptr);
519  slists.Get("a", &a);
520  slists.Get("b", &b);
521  slists.Get("c", &c);
522  ASSERT_EQ(a, "x\nt\nr\nsa");
523  ASSERT_EQ(b, "y\n2\nmonkey\ndf");
524  ASSERT_EQ(c, "asdasd\nasdasd\nbbnagnagsx");
525 
526  // Append, Get
527  slists.Append("a", "gh");
528  slists.Append("a", "jk");
529  slists.Append("b", "l;");
530  slists.Append("c", "rogosh");
531  slists.Get("a", &a);
532  slists.Get("b", &b);
533  slists.Get("c", &c);
534  ASSERT_EQ(a, "x\nt\nr\nsa\ngh\njk");
535  ASSERT_EQ(b, "y\n2\nmonkey\ndf\nl;");
536  ASSERT_EQ(c, "asdasd\nasdasd\nbbnagnagsx\nrogosh");
537 
538  // Compact, Get
539  db->CompactRange(nullptr, nullptr);
540  ASSERT_EQ(a, "x\nt\nr\nsa\ngh\njk");
541  ASSERT_EQ(b, "y\n2\nmonkey\ndf\nl;");
542  ASSERT_EQ(c, "asdasd\nasdasd\nbbnagnagsx\nrogosh");
543 
544  // Append, Flush, Compact, Get
545  slists.Append("b", "afcg");
547  db->CompactRange(nullptr, nullptr);
548  slists.Get("b", &b);
549  ASSERT_EQ(b, "y\n2\nmonkey\ndf\nl;\nafcg");
550  }
551 }

Here is the call graph for this function:

rocksdb::TEST ( TtlTest  ,
CompactionFilter   
)

Definition at line 469 of file ttl_test.cc.

469  {
470  MakeKVMap(kSampleSize_);
471 
472  OpenTtlWithTestCompaction(1);
473  PutValues(0, kSampleSize_); // T=0:Insert Set1. Delete at t=1
474  // T=2: TTL logic takes precedence over TestFilter:-Set1 should not be there
475  SleepCompactCheck(2, 0, kSampleSize_, false);
476  CloseTtl();
477 
478  OpenTtlWithTestCompaction(3);
479  PutValues(0, kSampleSize_); // T=0:Insert Set1.
480  int partition = kSampleSize_ / 3;
481  SleepCompactCheck(1, 0, partition, false); // Part dropped
482  SleepCompactCheck(0, partition, partition); // Part kept
483  SleepCompactCheck(0, 2 * partition, partition, true, true); // Part changed
484  CloseTtl();
485 }
rocksdb::TEST ( TtlTest  ,
KeyMayExist   
)

Definition at line 489 of file ttl_test.cc.

489  {
490  MakeKVMap(kSampleSize_);
491 
492  OpenTtl();
493  PutValues(0, kSampleSize_, false);
494 
495  SimpleKeyMayExistCheck();
496 
497  CloseTtl();
498 }
rocksdb::TEST ( RedisListsTest  ,
TrimPopTest   
)

Definition at line 525 of file redis_lists_test.cc.

References ASSERT_EQ, ASSERT_TRUE, rocksdb::RedisLists::Index(), rocksdb::RedisLists::InsertAfter(), rocksdb::RedisLists::InsertBefore(), rocksdb::RedisLists::Length(), rocksdb::RedisLists::PopLeft(), rocksdb::RedisLists::PopRight(), rocksdb::RedisLists::PushLeft(), rocksdb::RedisLists::PushRight(), and rocksdb::RedisLists::Trim().

525  {
526  RedisLists redis(kDefaultDbName, options, true); // Destructive
527 
528  string tempv; // Used below for all Index(), PopRight(), PopLeft()
529 
530  // A series of pushes and insertions
531  // Will result in [newbegin, z, a, aftera, x, newend]
532  redis.PushLeft("k1", "a");
533  redis.PushLeft("k1", "z");
534  redis.PushRight("k1", "x");
535  redis.InsertBefore("k1", "z", "newbegin"); // InsertBefore start of list
536  redis.InsertAfter("k1", "x", "newend"); // InsertAfter end of list
537  redis.InsertAfter("k1", "a", "aftera");
538 
539  // Simple PopLeft/Right test
540  ASSERT_TRUE(redis.PopLeft("k1", &tempv));
541  ASSERT_EQ(tempv, "newbegin");
542  ASSERT_EQ(redis.Length("k1"), 5);
543  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
544  ASSERT_EQ(tempv, "z");
545  ASSERT_TRUE(redis.PopRight("k1", &tempv));
546  ASSERT_EQ(tempv, "newend");
547  ASSERT_EQ(redis.Length("k1"), 4);
548  ASSERT_TRUE(redis.Index("k1", -1, &tempv));
549  ASSERT_EQ(tempv, "x");
550 
551  // Now have: [z, a, aftera, x]
552 
553  // Test Trim
554  ASSERT_TRUE(redis.Trim("k1", 0, -1)); // [z, a, aftera, x] (do nothing)
555  ASSERT_EQ(redis.Length("k1"), 4);
556  ASSERT_TRUE(redis.Trim("k1", 0, 2)); // [z, a, aftera]
557  ASSERT_EQ(redis.Length("k1"), 3);
558  ASSERT_TRUE(redis.Index("k1", -1, &tempv));
559  ASSERT_EQ(tempv, "aftera");
560  ASSERT_TRUE(redis.Trim("k1", 1, 1)); // [a]
561  ASSERT_EQ(redis.Length("k1"), 1);
562  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
563  ASSERT_EQ(tempv, "a");
564 
565  // Test out of bounds (empty) trim
566  ASSERT_TRUE(redis.Trim("k1", 1, 0));
567  ASSERT_EQ(redis.Length("k1"), 0);
568 
569  // Popping with empty list (return empty without error)
570  ASSERT_TRUE(!redis.PopLeft("k1", &tempv));
571  ASSERT_TRUE(!redis.PopRight("k1", &tempv));
572  ASSERT_TRUE(redis.Trim("k1", 0, 5));
573 
574  // Exhaustive Trim test (negative and invalid indices)
575  // Will start in [newbegin, z, a, aftera, x, newend]
576  redis.PushLeft("k1", "a");
577  redis.PushLeft("k1", "z");
578  redis.PushRight("k1", "x");
579  redis.InsertBefore("k1", "z", "newbegin"); // InsertBefore start of list
580  redis.InsertAfter("k1", "x", "newend"); // InsertAfter end of list
581  redis.InsertAfter("k1", "a", "aftera");
582  ASSERT_TRUE(redis.Trim("k1", -6, -1)); // Should do nothing
583  ASSERT_EQ(redis.Length("k1"), 6);
584  ASSERT_TRUE(redis.Trim("k1", 1, -2));
585  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
586  ASSERT_EQ(tempv, "z");
587  ASSERT_TRUE(redis.Index("k1", 3, &tempv));
588  ASSERT_EQ(tempv, "x");
589  ASSERT_EQ(redis.Length("k1"), 4);
590  ASSERT_TRUE(redis.Trim("k1", -3, -2));
591  ASSERT_EQ(redis.Length("k1"), 2);
592 }

Here is the call graph for this function:

rocksdb::TEST ( StringAppendOperatorTest  ,
SimpleTestNullDelimiter   
)

Definition at line 553 of file stringappend_test.cc.

References rocksdb::StringLists::Append(), ASSERT_EQ, ASSERT_TRUE, db, rocksdb::StringLists::Get(), and OpenDb().

553  {
554  auto db = OpenDb('\0');
555  StringLists slists(db);
556 
557  slists.Append("k1", "v1");
558  slists.Append("k1", "v2");
559  slists.Append("k1", "v3");
560 
561  std::string res;
562  bool status = slists.Get("k1", &res);
563  ASSERT_TRUE(status);
564 
565  // Construct the desired string. Default constructor doesn't like '\0' chars.
566  std::string checker("v1,v2,v3"); // Verify that the string is right size.
567  checker[2] = '\0'; // Use null delimiter instead of comma.
568  checker[5] = '\0';
569  assert(checker.size() == 8); // Verify it is still the correct size
570 
571  // Check that the rocksdb result string matches the desired string
572  assert(res.size() == checker.size());
573  ASSERT_EQ(res, checker);
574 }

Here is the call graph for this function:

rocksdb::TEST ( RedisListsTest  ,
RemoveTest   
)

Definition at line 595 of file redis_lists_test.cc.

References ASSERT_EQ, ASSERT_TRUE, rocksdb::RedisLists::Index(), rocksdb::RedisLists::InsertAfter(), rocksdb::RedisLists::InsertBefore(), rocksdb::RedisLists::Length(), rocksdb::RedisLists::PushLeft(), rocksdb::RedisLists::PushRight(), rocksdb::RedisLists::Remove(), and rocksdb::RedisLists::Trim().

595  {
596  RedisLists redis(kDefaultDbName, options, true); // Destructive
597 
598  string tempv; // Used below for all Index(), PopRight(), PopLeft()
599 
600  // A series of pushes and insertions
601  // Will result in [newbegin, z, a, aftera, x, newend, a, a]
602  redis.PushLeft("k1", "a");
603  redis.PushLeft("k1", "z");
604  redis.PushRight("k1", "x");
605  redis.InsertBefore("k1", "z", "newbegin"); // InsertBefore start of list
606  redis.InsertAfter("k1", "x", "newend"); // InsertAfter end of list
607  redis.InsertAfter("k1", "a", "aftera");
608  redis.PushRight("k1", "a");
609  redis.PushRight("k1", "a");
610 
611  // Verify
612  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
613  ASSERT_EQ(tempv, "newbegin");
614  ASSERT_TRUE(redis.Index("k1", -1, &tempv));
615  ASSERT_EQ(tempv, "a");
616 
617  // Check RemoveFirst (Remove the first two 'a')
618  // Results in [newbegin, z, aftera, x, newend, a]
619  int numRemoved = redis.Remove("k1", 2, "a");
620  ASSERT_EQ(numRemoved, 2);
621  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
622  ASSERT_EQ(tempv, "newbegin");
623  ASSERT_TRUE(redis.Index("k1", 1, &tempv));
624  ASSERT_EQ(tempv, "z");
625  ASSERT_TRUE(redis.Index("k1", 4, &tempv));
626  ASSERT_EQ(tempv, "newend");
627  ASSERT_TRUE(redis.Index("k1", 5, &tempv));
628  ASSERT_EQ(tempv, "a");
629  ASSERT_EQ(redis.Length("k1"), 6);
630 
631  // Repopulate some stuff
632  // Results in: [x, x, x, x, x, newbegin, z, x, aftera, x, newend, a, x]
633  redis.PushLeft("k1", "x");
634  redis.PushLeft("k1", "x");
635  redis.PushLeft("k1", "x");
636  redis.PushLeft("k1", "x");
637  redis.PushLeft("k1", "x");
638  redis.PushRight("k1", "x");
639  redis.InsertAfter("k1", "z", "x");
640 
641  // Test removal from end
642  numRemoved = redis.Remove("k1", -2, "x");
643  ASSERT_EQ(numRemoved, 2);
644  ASSERT_TRUE(redis.Index("k1", 8, &tempv));
645  ASSERT_EQ(tempv, "aftera");
646  ASSERT_TRUE(redis.Index("k1", 9, &tempv));
647  ASSERT_EQ(tempv, "newend");
648  ASSERT_TRUE(redis.Index("k1", 10, &tempv));
649  ASSERT_EQ(tempv, "a");
650  ASSERT_TRUE(!redis.Index("k1", 11, &tempv));
651  numRemoved = redis.Remove("k1", -2, "x");
652  ASSERT_EQ(numRemoved, 2);
653  ASSERT_TRUE(redis.Index("k1", 4, &tempv));
654  ASSERT_EQ(tempv, "newbegin");
655  ASSERT_TRUE(redis.Index("k1", 6, &tempv));
656  ASSERT_EQ(tempv, "aftera");
657 
658  // We now have: [x, x, x, x, newbegin, z, aftera, newend, a]
659  ASSERT_EQ(redis.Length("k1"), 9);
660  ASSERT_TRUE(redis.Index("k1", -1, &tempv));
661  ASSERT_EQ(tempv, "a");
662  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
663  ASSERT_EQ(tempv, "x");
664 
665  // Test over-shooting (removing more than there exists)
666  numRemoved = redis.Remove("k1", -9000, "x");
667  ASSERT_EQ(numRemoved , 4); // Only really removed 4
668  ASSERT_EQ(redis.Length("k1"), 5);
669  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
670  ASSERT_EQ(tempv, "newbegin");
671  numRemoved = redis.Remove("k1", 1, "x");
672  ASSERT_EQ(numRemoved, 0);
673 
674  // Try removing ALL!
675  numRemoved = redis.Remove("k1", 0, "newbegin"); // REMOVE 0 will remove all!
676  ASSERT_EQ(numRemoved, 1);
677 
678  // Removal from an empty-list
679  ASSERT_TRUE(redis.Trim("k1", 1, 0));
680  numRemoved = redis.Remove("k1", 1, "z");
681  ASSERT_EQ(numRemoved, 0);
682 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
Empty   
)

Definition at line 684 of file db_test.cc.

References ASSERT_EQ, ASSERT_TRUE, and db_.

684  {
685  do {
686  ASSERT_TRUE(db_ != nullptr);
687  ASSERT_EQ("NOT_FOUND", Get("foo"));
688  } while (ChangeOptions());
689 }
rocksdb::TEST ( RedisListsTest  ,
PersistenceMultiKeyTest   
)

Definition at line 686 of file redis_lists_test.cc.

References ASSERT_EQ, ASSERT_TRUE, rocksdb::RedisLists::Index(), rocksdb::RedisLists::InsertAfter(), rocksdb::RedisLists::InsertBefore(), rocksdb::RedisLists::Length(), rocksdb::RedisLists::PopLeft(), rocksdb::RedisLists::PushLeft(), and rocksdb::RedisLists::PushRight().

686  {
687 
688  string tempv; // Used below for all Index(), PopRight(), PopLeft()
689 
690  // Block one: populate a single key in the database
691  {
692  RedisLists redis(kDefaultDbName, options, true); // Destructive
693 
694  // A series of pushes and insertions
695  // Will result in [newbegin, z, a, aftera, x, newend, a, a]
696  redis.PushLeft("k1", "a");
697  redis.PushLeft("k1", "z");
698  redis.PushRight("k1", "x");
699  redis.InsertBefore("k1", "z", "newbegin"); // InsertBefore start of list
700  redis.InsertAfter("k1", "x", "newend"); // InsertAfter end of list
701  redis.InsertAfter("k1", "a", "aftera");
702  redis.PushRight("k1", "a");
703  redis.PushRight("k1", "a");
704 
705  ASSERT_TRUE(redis.Index("k1", 3, &tempv));
706  ASSERT_EQ(tempv, "aftera");
707  }
708 
709  // Block two: make sure changes were saved and add some other key
710  {
711  RedisLists redis(kDefaultDbName, options, false); // Persistent, non-destructive
712 
713  // Check
714  ASSERT_EQ(redis.Length("k1"), 8);
715  ASSERT_TRUE(redis.Index("k1", 3, &tempv));
716  ASSERT_EQ(tempv, "aftera");
717 
718  redis.PushRight("k2", "randomkey");
719  redis.PushLeft("k2", "sas");
720 
721  redis.PopLeft("k1", &tempv);
722  }
723 
724  // Block three: Verify the changes from block 2
725  {
726  RedisLists redis(kDefaultDbName, options, false); // Persistent, non-destructive
727 
728  // Check
729  ASSERT_EQ(redis.Length("k1"), 7);
730  ASSERT_EQ(redis.Length("k2"), 2);
731  ASSERT_TRUE(redis.Index("k1", 0, &tempv));
732  ASSERT_EQ(tempv, "z");
733  ASSERT_TRUE(redis.Index("k2", -2, &tempv));
734  ASSERT_EQ(tempv, "sas");
735  }
736 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
ReadWrite   
)

Definition at line 691 of file db_test.cc.

References ASSERT_EQ, and ASSERT_OK.

691  {
692  do {
693  ASSERT_OK(Put("foo", "v1"));
694  ASSERT_EQ("v1", Get("foo"));
695  ASSERT_OK(Put("bar", "v2"));
696  ASSERT_OK(Put("foo", "v3"));
697  ASSERT_EQ("v3", Get("foo"));
698  ASSERT_EQ("v2", Get("bar"));
699  } while (ChangeOptions());
700 }
rocksdb::TEST ( SimpleTableDBTest  ,
Empty   
)

Definition at line 698 of file simple_table_db_test.cc.

References ASSERT_EQ, ASSERT_TRUE, and db_.

698  {
699  ASSERT_TRUE(db_ != nullptr);
700  ASSERT_EQ("NOT_FOUND", Get("0000000000000foo"));
701 }
rocksdb::TEST ( SimpleTableDBTest  ,
ReadWrite   
)

Definition at line 703 of file simple_table_db_test.cc.

References ASSERT_EQ, and ASSERT_OK.

703  {
704  ASSERT_OK(Put("0000000000000foo", "v1"));
705  ASSERT_EQ("v1", Get("0000000000000foo"));
706  ASSERT_OK(Put("0000000000000bar", "v2"));
707  ASSERT_OK(Put("0000000000000foo", "v3"));
708  ASSERT_EQ("v3", Get("0000000000000foo"));
709  ASSERT_EQ("v2", Get("0000000000000bar"));
710 }
rocksdb::TEST ( DBTest  ,
IndexAndFilterBlocksOfNewTableAddedToCache   
)

Definition at line 704 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, BLOCK_CACHE_ADD, BLOCK_CACHE_DATA_MISS, BLOCK_CACHE_FILTER_HIT, BLOCK_CACHE_FILTER_MISS, BLOCK_CACHE_INDEX_MISS, rocksdb::Options::create_if_missing, CreateDBStatistics(), db_, rocksdb::Options::filter_policy, rocksdb::DBImpl::KeyMayExist(), NewBloomFilterPolicy(), rocksdb::DBImpl::Put(), rocksdb::Options::statistics, and value.

704  {
705  Options options = CurrentOptions();
706  std::unique_ptr<const FilterPolicy> filter_policy(NewBloomFilterPolicy(20));
707  options.filter_policy = filter_policy.get();
708  options.create_if_missing = true;
709  options.statistics = rocksdb::CreateDBStatistics();
710  DestroyAndReopen(&options);
711 
712  ASSERT_OK(db_->Put(WriteOptions(), "key", "val"));
713  // Create a new talbe.
714  dbfull()->Flush(FlushOptions());
715 
716  // index/filter blocks added to block cache right after table creation.
717  ASSERT_EQ(1,
718  options.statistics.get()->getTickerCount(BLOCK_CACHE_INDEX_MISS));
719  ASSERT_EQ(1,
720  options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_MISS));
721  ASSERT_EQ(2, /* only index/filter were added */
722  options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD));
723  ASSERT_EQ(0,
724  options.statistics.get()->getTickerCount(BLOCK_CACHE_DATA_MISS));
725 
726  // Make sure filter block is in cache.
727  std::string value;
728  ReadOptions ropt;
729  db_->KeyMayExist(ReadOptions(), "key", &value);
730 
731  // Miss count should remain the same.
732  ASSERT_EQ(1,
733  options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_MISS));
734  ASSERT_EQ(1,
735  options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_HIT));
736 
737  db_->KeyMayExist(ReadOptions(), "key", &value);
738  ASSERT_EQ(1,
739  options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_MISS));
740  ASSERT_EQ(2,
741  options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_HIT));
742 
743  // Make sure index block is in cache.
744  auto index_block_hit =
745  options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_HIT);
746  value = Get("key");
747  ASSERT_EQ(1,
748  options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_MISS));
749  ASSERT_EQ(index_block_hit + 1,
750  options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_HIT));
751 
752  value = Get("key");
753  ASSERT_EQ(1,
754  options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_MISS));
755  ASSERT_EQ(index_block_hit + 2,
756  options.statistics.get()->getTickerCount(BLOCK_CACHE_FILTER_HIT));
757 }

Here is the call graph for this function:

rocksdb::TEST ( SimpleTableDBTest  ,
Flush   
)

Definition at line 712 of file simple_table_db_test.cc.

References ASSERT_EQ, and ASSERT_OK.

712  {
713  ASSERT_OK(Put("0000000000000foo", "v1"));
714  ASSERT_OK(Put("0000000000000bar", "v2"));
715  ASSERT_OK(Put("0000000000000foo", "v3"));
716  dbfull()->TEST_FlushMemTable();
717  ASSERT_EQ("v3", Get("0000000000000foo"));
718  ASSERT_EQ("v2", Get("0000000000000bar"));
719 }
rocksdb::TEST ( SimpleTableDBTest  ,
Flush2   
)

Definition at line 721 of file simple_table_db_test.cc.

References ASSERT_EQ, and ASSERT_OK.

721  {
722  ASSERT_OK(Put("0000000000000bar", "b"));
723  ASSERT_OK(Put("0000000000000foo", "v1"));
724  dbfull()->TEST_FlushMemTable();
725 
726  ASSERT_OK(Put("0000000000000foo", "v2"));
727  dbfull()->TEST_FlushMemTable();
728  ASSERT_EQ("v2", Get("0000000000000foo"));
729 
730  ASSERT_OK(Put("0000000000000eee", "v3"));
731  dbfull()->TEST_FlushMemTable();
732  ASSERT_EQ("v3", Get("0000000000000eee"));
733 
734  ASSERT_OK(Delete("0000000000000bar"));
735  dbfull()->TEST_FlushMemTable();
736  ASSERT_EQ("NOT_FOUND", Get("0000000000000bar"));
737 
738  ASSERT_OK(Put("0000000000000eee", "v5"));
739  dbfull()->TEST_FlushMemTable();
740  ASSERT_EQ("v5", Get("0000000000000eee"));
741 }
rocksdb::TEST ( Harness  ,
SimpleEmptyKey   
)

Definition at line 747 of file table_test.cc.

References GenerateArgList(), and rocksdb::test::RandomSeed().

747  {
748  std::vector<TestArgs> args = GenerateArgList();
749  for (unsigned int i = 0; i < args.size(); i++) {
750  Init(args[i]);
751  Random rnd(test::RandomSeed() + 1);
752  Add("", "v");
753  Test(&rnd);
754  }
755 }

Here is the call graph for this function:

rocksdb::TEST ( SimpleTableDBTest  ,
CompactionTrigger   
)

Definition at line 755 of file simple_table_db_test.cc.

References ASSERT_EQ, ASSERT_OK, rocksdb::Options::level0_file_num_compaction_trigger, rocksdb::Options::max_mem_compaction_level, rocksdb::Options::num_levels, RandomString(), and rocksdb::Options::write_buffer_size.

755  {
756  Options options = CurrentOptions();
757  options.write_buffer_size = 100 << 10; //100KB
758  options.num_levels = 3;
759  options.max_mem_compaction_level = 0;
760  options.level0_file_num_compaction_trigger = 3;
761  Reopen(&options);
762 
763  Random rnd(301);
764 
765  for (int num = 0; num < options.level0_file_num_compaction_trigger - 1;
766  num++) {
767  std::vector<std::string> values;
768  // Write 120KB (12 values, each 10K)
769  for (int i = 0; i < 12; i++) {
770  values.push_back(RandomString(&rnd, 10000));
771  ASSERT_OK(Put(Key(i), values[i]));
772  }
773  dbfull()->TEST_WaitForFlushMemTable();
774  ASSERT_EQ(NumTableFilesAtLevel(0), num + 1);
775  }
776 
777  //generate one more file in level-0, and should trigger level-0 compaction
778  std::vector<std::string> values;
779  for (int i = 0; i < 12; i++) {
780  values.push_back(RandomString(&rnd, 10000));
781  ASSERT_OK(Put(Key(i), values[i]));
782  }
783  dbfull()->TEST_WaitForCompact();
784 
785  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
786  ASSERT_EQ(NumTableFilesAtLevel(1), 1);
787 }

Here is the call graph for this function:

rocksdb::TEST ( Harness  ,
SimpleSingle   
)

Definition at line 757 of file table_test.cc.

References GenerateArgList(), and rocksdb::test::RandomSeed().

757  {
758  std::vector<TestArgs> args = GenerateArgList();
759  for (unsigned int i = 0; i < args.size(); i++) {
760  Init(args[i]);
761  Random rnd(test::RandomSeed() + 2);
762  Add("abc", "v");
763  Test(&rnd);
764  }
765 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
LevelLimitReopen   
)

Definition at line 759 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, rocksdb::Status::IsCorruption(), rocksdb::Options::max_bytes_for_level_multiplier_additional, rocksdb::Options::num_levels, rocksdb::Status::ToString(), and value.

759  {
760  Options options = CurrentOptions();
761  Reopen(&options);
762 
763  const std::string value(1024 * 1024, ' ');
764  int i = 0;
765  while (NumTableFilesAtLevel(2) == 0) {
766  ASSERT_OK(Put(Key(i++), value));
767  }
768 
769  options.num_levels = 1;
770  options.max_bytes_for_level_multiplier_additional.resize(1, 1);
771  Status s = TryReopen(&options);
772  ASSERT_EQ(s.IsCorruption(), true);
773  ASSERT_EQ(s.ToString(),
774  "Corruption: VersionEdit: db already has "
775  "more levels than options.num_levels");
776 
777  options.num_levels = 10;
778  options.max_bytes_for_level_multiplier_additional.resize(10, 1);
779  ASSERT_OK(TryReopen(&options));
780 }

Here is the call graph for this function:

rocksdb::TEST ( Harness  ,
SimpleMulti   
)

Definition at line 767 of file table_test.cc.

References GenerateArgList(), and rocksdb::test::RandomSeed().

767  {
768  std::vector<TestArgs> args = GenerateArgList();
769  for (unsigned int i = 0; i < args.size(); i++) {
770  Init(args[i]);
771  Random rnd(test::RandomSeed() + 3);
772  Add("abc", "v");
773  Add("abcd", "v");
774  Add("ac", "v2");
775  Test(&rnd);
776  }
777 }

Here is the call graph for this function:

rocksdb::TEST ( Harness  ,
SimpleSpecialKey   
)

Definition at line 779 of file table_test.cc.

References GenerateArgList(), and rocksdb::test::RandomSeed().

779  {
780  std::vector<TestArgs> args = GenerateArgList();
781  for (unsigned int i = 0; i < args.size(); i++) {
782  Init(args[i]);
783  Random rnd(test::RandomSeed() + 4);
784  Add("\xff\xff", "v3");
785  Test(&rnd);
786  }
787 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
Preallocation   
)

Definition at line 782 of file db_test.cc.

References rocksdb::WritableFile::Append(), ASSERT_EQ, ASSERT_OK, dbname_, env_, rocksdb::WritableFile::GetPreallocationStatus(), rocksdb::Env::NewWritableFile(), and rocksdb::WritableFile::SetPreallocationBlockSize().

782  {
783  const std::string src = dbname_ + "/alloc_test";
784  unique_ptr<WritableFile> srcfile;
785  const EnvOptions soptions;
786  ASSERT_OK(env_->NewWritableFile(src, &srcfile, soptions));
787  srcfile->SetPreallocationBlockSize(1024 * 1024);
788 
789  // No writes should mean no preallocation
790  size_t block_size, last_allocated_block;
791  srcfile->GetPreallocationStatus(&block_size, &last_allocated_block);
792  ASSERT_EQ(last_allocated_block, 0UL);
793 
794  // Small write should preallocate one block
795  srcfile->Append("test");
796  srcfile->GetPreallocationStatus(&block_size, &last_allocated_block);
797  ASSERT_EQ(last_allocated_block, 1UL);
798 
799  // Write an entire preallocation block, make sure we increased by two.
800  std::string buf(block_size, ' ');
801  srcfile->Append(buf);
802  srcfile->GetPreallocationStatus(&block_size, &last_allocated_block);
803  ASSERT_EQ(last_allocated_block, 2UL);
804 
805  // Write five more blocks at once, ensure we're where we need to be.
806  buf = std::string(block_size * 5, ' ');
807  srcfile->Append(buf);
808  srcfile->GetPreallocationStatus(&block_size, &last_allocated_block);
809  ASSERT_EQ(last_allocated_block, 7UL);
810 }

Here is the call graph for this function:

rocksdb::TEST ( TableTest  ,
BasicTableProperties   
)

Definition at line 804 of file table_test.cc.

References rocksdb::BlockBuilder::Add(), rocksdb::Constructor::Add(), ASSERT_EQ, rocksdb::Options::block_restart_interval, BytewiseComparator(), rocksdb::Options::compression, rocksdb::BlockBuilder::Finish(), rocksdb::Constructor::Finish(), rocksdb::TableReader::GetTableProperties(), kBlockTrailerSize, kNoCompression, rocksdb::Slice::size(), and rocksdb::BlockBasedTableConstructor::table_reader().

804  {
805  BlockBasedTableConstructor c(BytewiseComparator());
806 
807  c.Add("a1", "val1");
808  c.Add("b2", "val2");
809  c.Add("c3", "val3");
810  c.Add("d4", "val4");
811  c.Add("e5", "val5");
812  c.Add("f6", "val6");
813  c.Add("g7", "val7");
814  c.Add("h8", "val8");
815  c.Add("j9", "val9");
816 
817  std::vector<std::string> keys;
818  KVMap kvmap;
819  Options options;
820  options.compression = kNoCompression;
821  options.block_restart_interval = 1;
822 
823  c.Finish(options, &keys, &kvmap);
824 
825  auto& props = c.table_reader()->GetTableProperties();
826  ASSERT_EQ(kvmap.size(), props.num_entries);
827 
828  auto raw_key_size = kvmap.size() * 2ul;
829  auto raw_value_size = kvmap.size() * 4ul;
830 
831  ASSERT_EQ(raw_key_size, props.raw_key_size);
832  ASSERT_EQ(raw_value_size, props.raw_value_size);
833  ASSERT_EQ(1ul, props.num_data_blocks);
834  ASSERT_EQ("", props.filter_policy_name); // no filter policy is used
835 
836  // Verify data size.
837  BlockBuilder block_builder(options);
838  for (const auto& item : kvmap) {
839  block_builder.Add(item.first, item.second);
840  }
841  Slice content = block_builder.Finish();
842  ASSERT_EQ(
843  content.size() + kBlockTrailerSize,
844  props.data_size
845  );
846 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
PutDeleteGet   
)

Definition at line 812 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, db_, rocksdb::DBImpl::Delete(), and rocksdb::DBImpl::Put().

812  {
813  do {
814  ASSERT_OK(db_->Put(WriteOptions(), "foo", "v1"));
815  ASSERT_EQ("v1", Get("foo"));
816  ASSERT_OK(db_->Put(WriteOptions(), "foo", "v2"));
817  ASSERT_EQ("v2", Get("foo"));
818  ASSERT_OK(db_->Delete(WriteOptions(), "foo"));
819  ASSERT_EQ("NOT_FOUND", Get("foo"));
820  } while (ChangeOptions());
821 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
GetFromImmutableLayer   
)

Definition at line 824 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, rocksdb::Options::env, env_, and rocksdb::Options::write_buffer_size.

824  {
825  do {
826  Options options = CurrentOptions();
827  options.env = env_;
828  options.write_buffer_size = 100000; // Small write buffer
829  Reopen(&options);
830 
831  ASSERT_OK(Put("foo", "v1"));
832  ASSERT_EQ("v1", Get("foo"));
833 
834  env_->delay_sstable_sync_.Release_Store(env_); // Block sync calls
835  Put("k1", std::string(100000, 'x')); // Fill memtable
836  Put("k2", std::string(100000, 'y')); // Trigger compaction
837  ASSERT_EQ("v1", Get("foo"));
838  env_->delay_sstable_sync_.Release_Store(nullptr); // Release sync calls
839  } while (ChangeOptions());
840 }
rocksdb::TEST ( DBTest  ,
GetFromVersions   
)

Definition at line 842 of file db_test.cc.

References ASSERT_EQ, and ASSERT_OK.

842  {
843  do {
844  ASSERT_OK(Put("foo", "v1"));
845  dbfull()->TEST_FlushMemTable();
846  ASSERT_EQ("v1", Get("foo"));
847  } while (ChangeOptions());
848 }
rocksdb::TEST ( TableTest  ,
FilterPolicyNameProperties   
)

Definition at line 848 of file table_test.cc.

References rocksdb::Constructor::Add(), ASSERT_EQ, BytewiseComparator(), rocksdb::Options::filter_policy, rocksdb::Constructor::Finish(), rocksdb::TableReader::GetTableProperties(), NewBloomFilterPolicy(), and rocksdb::BlockBasedTableConstructor::table_reader().

848  {
849  BlockBasedTableConstructor c(BytewiseComparator());
850  c.Add("a1", "val1");
851  std::vector<std::string> keys;
852  KVMap kvmap;
853  Options options;
854  std::unique_ptr<const FilterPolicy> filter_policy(
856  );
857  options.filter_policy = filter_policy.get();
858 
859  c.Finish(options, &keys, &kvmap);
860  auto& props = c.table_reader()->GetTableProperties();
861  ASSERT_EQ("rocksdb.BuiltinBloomFilter", props.filter_policy_name);
862 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
GetSnapshot   
)

Definition at line 850 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, db_, rocksdb::DBImpl::GetSnapshot(), and rocksdb::DBImpl::ReleaseSnapshot().

850  {
851  do {
852  // Try with both a short key and a long key
853  for (int i = 0; i < 2; i++) {
854  std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x');
855  ASSERT_OK(Put(key, "v1"));
856  const Snapshot* s1 = db_->GetSnapshot();
857  ASSERT_OK(Put(key, "v2"));
858  ASSERT_EQ("v2", Get(key));
859  ASSERT_EQ("v1", Get(key, s1));
860  dbfull()->TEST_FlushMemTable();
861  ASSERT_EQ("v2", Get(key));
862  ASSERT_EQ("v1", Get(key, s1));
863  db_->ReleaseSnapshot(s1);
864  }
865  } while (ChangeOptions());
866 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
GetLevel0Ordering   
)

Definition at line 868 of file db_test.cc.

References ASSERT_EQ, and ASSERT_OK.

868  {
869  do {
870  // Check that we process level-0 files in correct order. The code
871  // below generates two level-0 files where the earlier one comes
872  // before the later one in the level-0 file list since the earlier
873  // one has a smaller "smallest" key.
874  ASSERT_OK(Put("bar", "b"));
875  ASSERT_OK(Put("foo", "v1"));
876  dbfull()->TEST_FlushMemTable();
877  ASSERT_OK(Put("foo", "v2"));
878  dbfull()->TEST_FlushMemTable();
879  ASSERT_EQ("v2", Get("foo"));
880  } while (ChangeOptions());
881 }
rocksdb::TEST ( TableTest  ,
IndexSizeStat   
)

Definition at line 873 of file table_test.cc.

References rocksdb::Constructor::Add(), ASSERT_GT, rocksdb::Options::block_restart_interval, BytewiseComparator(), rocksdb::Options::compression, rocksdb::Constructor::Finish(), rocksdb::TableReader::GetTableProperties(), rocksdb::TableProperties::index_size, kNoCompression, rocksdb::test::RandomSeed(), RandomString(), and rocksdb::BlockBasedTableConstructor::table_reader().

873  {
874  uint64_t last_index_size = 0;
875 
876  // we need to use random keys since the pure human readable texts
877  // may be well compressed, resulting insignifcant change of index
878  // block size.
879  Random rnd(test::RandomSeed());
880  std::vector<std::string> keys;
881 
882  for (int i = 0; i < 100; ++i) {
883  keys.push_back(RandomString(&rnd, 10000));
884  }
885 
886  // Each time we load one more key to the table. the table index block
887  // size is expected to be larger than last time's.
888  for (size_t i = 1; i < keys.size(); ++i) {
889  BlockBasedTableConstructor c(BytewiseComparator());
890  for (size_t j = 0; j < i; ++j) {
891  c.Add(keys[j], "val");
892  }
893 
894  std::vector<std::string> ks;
895  KVMap kvmap;
896  Options options;
897  options.compression = kNoCompression;
898  options.block_restart_interval = 1;
899 
900  c.Finish(options, &ks, &kvmap);
901  auto index_size =
902  c.table_reader()->GetTableProperties().index_size;
903  ASSERT_GT(index_size, last_index_size);
904  last_index_size = index_size;
905  }
906 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
GetOrderedByLevels   
)

Definition at line 883 of file db_test.cc.

References ASSERT_EQ, and ASSERT_OK.

883  {
884  do {
885  ASSERT_OK(Put("foo", "v1"));
886  Compact("a", "z");
887  ASSERT_EQ("v1", Get("foo"));
888  ASSERT_OK(Put("foo", "v2"));
889  ASSERT_EQ("v2", Get("foo"));
890  dbfull()->TEST_FlushMemTable();
891  ASSERT_EQ("v2", Get("foo"));
892  } while (ChangeOptions());
893 }
rocksdb::TEST ( DBTest  ,
GetPicksCorrectFile   
)

Definition at line 895 of file db_test.cc.

References ASSERT_EQ, and ASSERT_OK.

895  {
896  do {
897  // Arrange to have multiple files in a non-level-0 level.
898  ASSERT_OK(Put("a", "va"));
899  Compact("a", "b");
900  ASSERT_OK(Put("x", "vx"));
901  Compact("x", "y");
902  ASSERT_OK(Put("f", "vf"));
903  Compact("f", "g");
904  ASSERT_EQ("va", Get("a"));
905  ASSERT_EQ("vf", Get("f"));
906  ASSERT_EQ("vx", Get("x"));
907  } while (ChangeOptions());
908 }
rocksdb::TEST ( TableTest  ,
NumBlockStat   
)

Definition at line 908 of file table_test.cc.

References rocksdb::Constructor::Add(), ASSERT_EQ, rocksdb::Options::block_restart_interval, rocksdb::Options::block_size, BytewiseComparator(), rocksdb::Options::compression, rocksdb::Constructor::Finish(), rocksdb::TableReader::GetTableProperties(), kNoCompression, rocksdb::TableProperties::num_data_blocks, rocksdb::test::RandomSeed(), RandomString(), and rocksdb::BlockBasedTableConstructor::table_reader().

908  {
909  Random rnd(test::RandomSeed());
910  BlockBasedTableConstructor c(BytewiseComparator());
911  Options options;
912  options.compression = kNoCompression;
913  options.block_restart_interval = 1;
914  options.block_size = 1000;
915 
916  for (int i = 0; i < 10; ++i) {
917  // the key/val are slightly smaller than block size, so that each block
918  // holds roughly one key/value pair.
919  c.Add(RandomString(&rnd, 900), "val");
920  }
921 
922  std::vector<std::string> ks;
923  KVMap kvmap;
924  c.Finish(options, &ks, &kvmap);
925  ASSERT_EQ(
926  kvmap.size(),
927  c.table_reader()->GetTableProperties().num_data_blocks
928  );
929 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
GetEncountersEmptyLevel   
)

Definition at line 910 of file db_test.cc.

References ASSERT_EQ, ASSERT_LE, env_, and rocksdb::Env::SleepForMicroseconds().

910  {
911  do {
912  // Arrange for the following to happen:
913  // * sstable A in level 0
914  // * nothing in level 1
915  // * sstable B in level 2
916  // Then do enough Get() calls to arrange for an automatic compaction
917  // of sstable A. A bug would cause the compaction to be marked as
918  // occuring at level 1 (instead of the correct level 0).
919 
920  // Step 1: First place sstables in levels 0 and 2
921  int compaction_count = 0;
922  while (NumTableFilesAtLevel(0) == 0 ||
923  NumTableFilesAtLevel(2) == 0) {
924  ASSERT_LE(compaction_count, 100) << "could not fill levels 0 and 2";
925  compaction_count++;
926  Put("a", "begin");
927  Put("z", "end");
928  dbfull()->TEST_FlushMemTable();
929  }
930 
931  // Step 2: clear level 1 if necessary.
932  dbfull()->TEST_CompactRange(1, nullptr, nullptr);
933  ASSERT_EQ(NumTableFilesAtLevel(0), 1);
934  ASSERT_EQ(NumTableFilesAtLevel(1), 0);
935  ASSERT_EQ(NumTableFilesAtLevel(2), 1);
936 
937  // Step 3: read a bunch of times
938  for (int i = 0; i < 1000; i++) {
939  ASSERT_EQ("NOT_FOUND", Get("missing"));
940  }
941 
942  // Step 4: Wait for compaction to finish
943  env_->SleepForMicroseconds(1000000);
944 
945  ASSERT_EQ(NumTableFilesAtLevel(0), 1); // XXX
946  } while (ChangeOptions(kSkipUniversalCompaction));
947 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
KeyMayExist   
)

Definition at line 952 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, ASSERT_TRUE, BLOCK_CACHE_ADD, CreateDBStatistics(), db_, rocksdb::DBImpl::Delete(), rocksdb::Options::filter_policy, rocksdb::DBImpl::KeyMayExist(), NewBloomFilterPolicy(), NO_FILE_OPENS, rocksdb::DBImpl::Put(), rocksdb::Options::statistics, value, and value_found.

952  {
953  do {
954  ReadOptions ropts;
955  std::string value;
956  Options options = CurrentOptions();
957  options.filter_policy = NewBloomFilterPolicy(20);
958  options.statistics = rocksdb::CreateDBStatistics();
959  Reopen(&options);
960 
961  ASSERT_TRUE(!db_->KeyMayExist(ropts, "a", &value));
962 
963  ASSERT_OK(db_->Put(WriteOptions(), "a", "b"));
964  bool value_found = false;
965  ASSERT_TRUE(db_->KeyMayExist(ropts, "a", &value, &value_found));
966  ASSERT_TRUE(value_found);
967  ASSERT_EQ("b", value);
968 
969  dbfull()->Flush(FlushOptions());
970  value.clear();
971 
972  long numopen = options.statistics.get()->getTickerCount(NO_FILE_OPENS);
973  long cache_added =
974  options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD);
975  ASSERT_TRUE(db_->KeyMayExist(ropts, "a", &value, &value_found));
976  ASSERT_TRUE(!value_found);
977  // assert that no new files were opened and no new blocks were
978  // read into block cache.
979  ASSERT_EQ(numopen, options.statistics.get()->getTickerCount(NO_FILE_OPENS));
980  ASSERT_EQ(cache_added,
981  options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD));
982 
983  ASSERT_OK(db_->Delete(WriteOptions(), "a"));
984 
985  numopen = options.statistics.get()->getTickerCount(NO_FILE_OPENS);
986  cache_added =
987  options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD);
988  ASSERT_TRUE(!db_->KeyMayExist(ropts, "a", &value));
989  ASSERT_EQ(numopen, options.statistics.get()->getTickerCount(NO_FILE_OPENS));
990  ASSERT_EQ(cache_added,
991  options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD));
992 
993  dbfull()->Flush(FlushOptions());
994  dbfull()->CompactRange(nullptr, nullptr);
995 
996  numopen = options.statistics.get()->getTickerCount(NO_FILE_OPENS);
997  cache_added =
998  options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD);
999  ASSERT_TRUE(!db_->KeyMayExist(ropts, "a", &value));
1000  ASSERT_EQ(numopen, options.statistics.get()->getTickerCount(NO_FILE_OPENS));
1001  ASSERT_EQ(cache_added,
1002  options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD));
1003 
1004  ASSERT_OK(db_->Delete(WriteOptions(), "c"));
1005 
1006  numopen = options.statistics.get()->getTickerCount(NO_FILE_OPENS);
1007  cache_added =
1008  options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD);
1009  ASSERT_TRUE(!db_->KeyMayExist(ropts, "c", &value));
1010  ASSERT_EQ(numopen, options.statistics.get()->getTickerCount(NO_FILE_OPENS));
1011  ASSERT_EQ(cache_added,
1012  options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD));
1013 
1014  delete options.filter_policy;
1015  } while (ChangeOptions());
1016 }

Here is the call graph for this function:

rocksdb::TEST ( TableTest  ,
BlockCacheTest   
)

Definition at line 977 of file table_test.cc.

References rocksdb::Constructor::Add(), ASSERT_EQ, rocksdb::BlockCacheProperties::AssertEqual(), rocksdb::Options::block_cache, BytewiseComparator(), rocksdb::Options::create_if_missing, CreateDBStatistics(), rocksdb::Constructor::Finish(), rocksdb::Iterator::key(), rocksdb::BlockBasedTableConstructor::NewIterator(), NewLRUCache(), rocksdb::BlockBasedTableConstructor::Reopen(), rocksdb::Iterator::SeekToFirst(), rocksdb::Options::statistics, and rocksdb::Slice::ToString().

977  {
978  // -- Table construction
979  Options options;
980  options.create_if_missing = true;
981  options.statistics = CreateDBStatistics();
982  options.block_cache = NewLRUCache(1024);
983  std::vector<std::string> keys;
984  KVMap kvmap;
985 
986  BlockBasedTableConstructor c(BytewiseComparator());
987  c.Add("key", "value");
988  c.Finish(options, &keys, &kvmap);
989 
990  // -- PART 1: Open with regular block cache.
991  // Since block_cache is disabled, no cache activities will be involved.
992  unique_ptr<Iterator> iter;
993 
994  // At first, no block will be accessed.
995  {
996  BlockCacheProperties props(options.statistics);
997  // index will be added to block cache.
998  props.AssertEqual(
999  1, // index block miss
1000  0,
1001  0,
1002  0
1003  );
1004  }
1005 
1006  // Only index block will be accessed
1007  {
1008  iter.reset(c.NewIterator());
1009  BlockCacheProperties props(options.statistics);
1010  // NOTE: to help better highlight the "detla" of each ticker, I use
1011  // <last_value> + <added_value> to indicate the increment of changed
1012  // value; other numbers remain the same.
1013  props.AssertEqual(
1014  1,
1015  0 + 1, // index block hit
1016  0,
1017  0
1018  );
1019  }
1020 
1021  // Only data block will be accessed
1022  {
1023  iter->SeekToFirst();
1024  BlockCacheProperties props(options.statistics);
1025  props.AssertEqual(
1026  1,
1027  1,
1028  0 + 1, // data block miss
1029  0
1030  );
1031  }
1032 
1033  // Data block will be in cache
1034  {
1035  iter.reset(c.NewIterator());
1036  iter->SeekToFirst();
1037  BlockCacheProperties props(options.statistics);
1038  props.AssertEqual(
1039  1,
1040  1 + 1, // index block hit
1041  1,
1042  0 + 1 // data block hit
1043  );
1044  }
1045  // release the iterator so that the block cache can reset correctly.
1046  iter.reset();
1047 
1048  // -- PART 2: Open without block cache
1049  options.block_cache.reset();
1050  options.statistics = CreateDBStatistics(); // reset the props
1051  c.Reopen(options);
1052 
1053  {
1054  iter.reset(c.NewIterator());
1055  iter->SeekToFirst();
1056  ASSERT_EQ("key", iter->key().ToString());
1057  BlockCacheProperties props(options.statistics);
1058  // Nothing is affected at all
1059  props.AssertEqual(0, 0, 0, 0);
1060  }
1061 
1062  // -- PART 3: Open with very small block cache
1063  // In this test, no block will ever get hit since the block cache is
1064  // too small to fit even one entry.
1065  options.block_cache = NewLRUCache(1);
1066  c.Reopen(options);
1067  {
1068  BlockCacheProperties props(options.statistics);
1069  props.AssertEqual(
1070  1, // index block miss
1071  0,
1072  0,
1073  0
1074  );
1075  }
1076 
1077 
1078  {
1079  // Both index and data block get accessed.
1080  // It first cache index block then data block. But since the cache size
1081  // is only 1, index block will be purged after data block is inserted.
1082  iter.reset(c.NewIterator());
1083  BlockCacheProperties props(options.statistics);
1084  props.AssertEqual(
1085  1 + 1, // index block miss
1086  0,
1087  0, // data block miss
1088  0
1089  );
1090  }
1091 
1092  {
1093  // SeekToFirst() accesses data block. With similar reason, we expect data
1094  // block's cache miss.
1095  iter->SeekToFirst();
1096  BlockCacheProperties props(options.statistics);
1097  props.AssertEqual(
1098  2,
1099  0,
1100  0 + 1, // data block miss
1101  0
1102  );
1103  }
1104 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
NonBlockingIteration   
)

Definition at line 1018 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, ASSERT_TRUE, BLOCK_CACHE_ADD, CreateDBStatistics(), db_, kBlockCacheTier, rocksdb::DBImpl::NewIterator(), NO_FILE_OPENS, rocksdb::DBImpl::Put(), rocksdb::ReadOptions::read_tier, and rocksdb::Options::statistics.

1018  {
1019  do {
1020  ReadOptions non_blocking_opts, regular_opts;
1021  Options options = CurrentOptions();
1022  options.statistics = rocksdb::CreateDBStatistics();
1023  non_blocking_opts.read_tier = kBlockCacheTier;
1024  Reopen(&options);
1025  // write one kv to the database.
1026  ASSERT_OK(db_->Put(WriteOptions(), "a", "b"));
1027 
1028  // scan using non-blocking iterator. We should find it because
1029  // it is in memtable.
1030  Iterator* iter = db_->NewIterator(non_blocking_opts);
1031  int count = 0;
1032  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
1033  ASSERT_OK(iter->status());
1034  count++;
1035  }
1036  ASSERT_EQ(count, 1);
1037  delete iter;
1038 
1039  // flush memtable to storage. Now, the key should not be in the
1040  // memtable neither in the block cache.
1041  dbfull()->Flush(FlushOptions());
1042 
1043  // verify that a non-blocking iterator does not find any
1044  // kvs. Neither does it do any IOs to storage.
1045  long numopen = options.statistics.get()->getTickerCount(NO_FILE_OPENS);
1046  long cache_added =
1047  options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD);
1048  iter = db_->NewIterator(non_blocking_opts);
1049  count = 0;
1050  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
1051  count++;
1052  }
1053  ASSERT_EQ(count, 0);
1054  ASSERT_TRUE(iter->status().IsIncomplete());
1055  ASSERT_EQ(numopen, options.statistics.get()->getTickerCount(NO_FILE_OPENS));
1056  ASSERT_EQ(cache_added,
1057  options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD));
1058  delete iter;
1059 
1060  // read in the specified block via a regular get
1061  ASSERT_EQ(Get("a"), "b");
1062 
1063  // verify that we can find it via a non-blocking scan
1064  numopen = options.statistics.get()->getTickerCount(NO_FILE_OPENS);
1065  cache_added =
1066  options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD);
1067  iter = db_->NewIterator(non_blocking_opts);
1068  count = 0;
1069  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
1070  ASSERT_OK(iter->status());
1071  count++;
1072  }
1073  ASSERT_EQ(count, 1);
1074  ASSERT_EQ(numopen, options.statistics.get()->getTickerCount(NO_FILE_OPENS));
1075  ASSERT_EQ(cache_added,
1076  options.statistics.get()->getTickerCount(BLOCK_CACHE_ADD));
1077  delete iter;
1078 
1079  } while (ChangeOptions());
1080 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
FilterDeletes   
)

Definition at line 1084 of file db_test.cc.

References ASSERT_EQ, rocksdb::WriteBatch::Clear(), rocksdb::WriteBatch::Delete(), rocksdb::Options::filter_deletes, rocksdb::Options::filter_policy, NewBloomFilterPolicy(), and rocksdb::WriteBatch::Put().

1084  {
1085  do {
1086  Options options = CurrentOptions();
1087  options.filter_policy = NewBloomFilterPolicy(20);
1088  options.filter_deletes = true;
1089  Reopen(&options);
1090  WriteBatch batch;
1091 
1092  batch.Delete("a");
1093  dbfull()->Write(WriteOptions(), &batch);
1094  ASSERT_EQ(AllEntriesFor("a"), "[ ]"); // Delete skipped
1095  batch.Clear();
1096 
1097  batch.Put("a", "b");
1098  batch.Delete("a");
1099  dbfull()->Write(WriteOptions(), &batch);
1100  ASSERT_EQ(Get("a"), "NOT_FOUND");
1101  ASSERT_EQ(AllEntriesFor("a"), "[ DEL, b ]"); // Delete issued
1102  batch.Clear();
1103 
1104  batch.Delete("c");
1105  batch.Put("c", "d");
1106  dbfull()->Write(WriteOptions(), &batch);
1107  ASSERT_EQ(Get("c"), "d");
1108  ASSERT_EQ(AllEntriesFor("c"), "[ d ]"); // Delete skipped
1109  batch.Clear();
1110 
1111  dbfull()->Flush(FlushOptions()); // A stray Flush
1112 
1113  batch.Delete("c");
1114  dbfull()->Write(WriteOptions(), &batch);
1115  ASSERT_EQ(AllEntriesFor("c"), "[ DEL, d ]"); // Delete issued
1116  batch.Clear();
1117 
1118  delete options.filter_policy;
1119  } while (ChangeCompactOptions());
1120 }

Here is the call graph for this function:

rocksdb::TEST ( TableTest  ,
ApproximateOffsetOfPlain   
)

Definition at line 1106 of file table_test.cc.

References rocksdb::Constructor::Add(), rocksdb::BlockBasedTableConstructor::ApproximateOffsetOf(), ASSERT_TRUE, Between(), rocksdb::Options::block_size, BytewiseComparator(), rocksdb::Options::compression, rocksdb::Constructor::Finish(), and kNoCompression.

1106  {
1107  BlockBasedTableConstructor c(BytewiseComparator());
1108  c.Add("k01", "hello");
1109  c.Add("k02", "hello2");
1110  c.Add("k03", std::string(10000, 'x'));
1111  c.Add("k04", std::string(200000, 'x'));
1112  c.Add("k05", std::string(300000, 'x'));
1113  c.Add("k06", "hello3");
1114  c.Add("k07", std::string(100000, 'x'));
1115  std::vector<std::string> keys;
1116  KVMap kvmap;
1117  Options options;
1118  options.block_size = 1024;
1119  options.compression = kNoCompression;
1120  c.Finish(options, &keys, &kvmap);
1121 
1122  ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
1123  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
1124  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0));
1125  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
1126  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 0, 0));
1127  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000));
1128  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04a"), 210000, 211000));
1129  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000));
1130  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000));
1131  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000));
1132  ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 612000));
1133 
1134 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
IterEmpty   
)

Definition at line 1122 of file db_test.cc.

References ASSERT_EQ, db_, rocksdb::DBImpl::NewIterator(), rocksdb::Iterator::Seek(), rocksdb::Iterator::SeekToFirst(), and rocksdb::Iterator::SeekToLast().

1122  {
1123  do {
1124  Iterator* iter = db_->NewIterator(ReadOptions());
1125 
1126  iter->SeekToFirst();
1127  ASSERT_EQ(IterStatus(iter), "(invalid)");
1128 
1129  iter->SeekToLast();
1130  ASSERT_EQ(IterStatus(iter), "(invalid)");
1131 
1132  iter->Seek("foo");
1133  ASSERT_EQ(IterStatus(iter), "(invalid)");
1134 
1135  delete iter;
1136  } while (ChangeCompactOptions());
1137 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
IterSingle   
)

Definition at line 1139 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, db_, rocksdb::DBImpl::NewIterator(), rocksdb::Iterator::Next(), rocksdb::Iterator::Prev(), rocksdb::Iterator::Seek(), rocksdb::Iterator::SeekToFirst(), and rocksdb::Iterator::SeekToLast().

1139  {
1140  do {
1141  ASSERT_OK(Put("a", "va"));
1142  Iterator* iter = db_->NewIterator(ReadOptions());
1143 
1144  iter->SeekToFirst();
1145  ASSERT_EQ(IterStatus(iter), "a->va");
1146  iter->Next();
1147  ASSERT_EQ(IterStatus(iter), "(invalid)");
1148  iter->SeekToFirst();
1149  ASSERT_EQ(IterStatus(iter), "a->va");
1150  iter->Prev();
1151  ASSERT_EQ(IterStatus(iter), "(invalid)");
1152 
1153  iter->SeekToLast();
1154  ASSERT_EQ(IterStatus(iter), "a->va");
1155  iter->Next();
1156  ASSERT_EQ(IterStatus(iter), "(invalid)");
1157  iter->SeekToLast();
1158  ASSERT_EQ(IterStatus(iter), "a->va");
1159  iter->Prev();
1160  ASSERT_EQ(IterStatus(iter), "(invalid)");
1161 
1162  iter->Seek("");
1163  ASSERT_EQ(IterStatus(iter), "a->va");
1164  iter->Next();
1165  ASSERT_EQ(IterStatus(iter), "(invalid)");
1166 
1167  iter->Seek("a");
1168  ASSERT_EQ(IterStatus(iter), "a->va");
1169  iter->Next();
1170  ASSERT_EQ(IterStatus(iter), "(invalid)");
1171 
1172  iter->Seek("b");
1173  ASSERT_EQ(IterStatus(iter), "(invalid)");
1174 
1175  delete iter;
1176  } while (ChangeCompactOptions());
1177 }

Here is the call graph for this function:

rocksdb::TEST ( TableTest  ,
ApproximateOffsetOfCompressed   
)

Definition at line 1159 of file table_test.cc.

References Do_Compression_Test(), kSnappyCompression, kZlibCompression, SnappyCompressionSupported(), and ZlibCompressionSupported().

1159  {
1160  CompressionType compression_state[2];
1161  int valid = 0;
1162  if (!SnappyCompressionSupported()) {
1163  fprintf(stderr, "skipping snappy compression tests\n");
1164  } else {
1165  compression_state[valid] = kSnappyCompression;
1166  valid++;
1167  }
1168 
1169  if (!ZlibCompressionSupported()) {
1170  fprintf(stderr, "skipping zlib compression tests\n");
1171  } else {
1172  compression_state[valid] = kZlibCompression;
1173  valid++;
1174  }
1175 
1176  for(int i =0; i < valid; i++)
1177  {
1178  Do_Compression_Test(compression_state[i]);
1179  }
1180 
1181 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
IterMulti   
)

Definition at line 1179 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, db_, rocksdb::DBImpl::NewIterator(), rocksdb::Iterator::Next(), rocksdb::Iterator::Prev(), rocksdb::Iterator::Seek(), rocksdb::Iterator::SeekToFirst(), and rocksdb::Iterator::SeekToLast().

1179  {
1180  do {
1181  ASSERT_OK(Put("a", "va"));
1182  ASSERT_OK(Put("b", "vb"));
1183  ASSERT_OK(Put("c", "vc"));
1184  Iterator* iter = db_->NewIterator(ReadOptions());
1185 
1186  iter->SeekToFirst();
1187  ASSERT_EQ(IterStatus(iter), "a->va");
1188  iter->Next();
1189  ASSERT_EQ(IterStatus(iter), "b->vb");
1190  iter->Next();
1191  ASSERT_EQ(IterStatus(iter), "c->vc");
1192  iter->Next();
1193  ASSERT_EQ(IterStatus(iter), "(invalid)");
1194  iter->SeekToFirst();
1195  ASSERT_EQ(IterStatus(iter), "a->va");
1196  iter->Prev();
1197  ASSERT_EQ(IterStatus(iter), "(invalid)");
1198 
1199  iter->SeekToLast();
1200  ASSERT_EQ(IterStatus(iter), "c->vc");
1201  iter->Prev();
1202  ASSERT_EQ(IterStatus(iter), "b->vb");
1203  iter->Prev();
1204  ASSERT_EQ(IterStatus(iter), "a->va");
1205  iter->Prev();
1206  ASSERT_EQ(IterStatus(iter), "(invalid)");
1207  iter->SeekToLast();
1208  ASSERT_EQ(IterStatus(iter), "c->vc");
1209  iter->Next();
1210  ASSERT_EQ(IterStatus(iter), "(invalid)");
1211 
1212  iter->Seek("");
1213  ASSERT_EQ(IterStatus(iter), "a->va");
1214  iter->Seek("a");
1215  ASSERT_EQ(IterStatus(iter), "a->va");
1216  iter->Seek("ax");
1217  ASSERT_EQ(IterStatus(iter), "b->vb");
1218  iter->Seek("b");
1219  ASSERT_EQ(IterStatus(iter), "b->vb");
1220  iter->Seek("z");
1221  ASSERT_EQ(IterStatus(iter), "(invalid)");
1222 
1223  // Switch from reverse to forward
1224  iter->SeekToLast();
1225  iter->Prev();
1226  iter->Prev();
1227  iter->Next();
1228  ASSERT_EQ(IterStatus(iter), "b->vb");
1229 
1230  // Switch from forward to reverse
1231  iter->SeekToFirst();
1232  iter->Next();
1233  iter->Next();
1234  iter->Prev();
1235  ASSERT_EQ(IterStatus(iter), "b->vb");
1236 
1237  // Make sure iter stays at snapshot
1238  ASSERT_OK(Put("a", "va2"));
1239  ASSERT_OK(Put("a2", "va3"));
1240  ASSERT_OK(Put("b", "vb2"));
1241  ASSERT_OK(Put("c", "vc2"));
1242  ASSERT_OK(Delete("b"));
1243  iter->SeekToFirst();
1244  ASSERT_EQ(IterStatus(iter), "a->va");
1245  iter->Next();
1246  ASSERT_EQ(IterStatus(iter), "b->vb");
1247  iter->Next();
1248  ASSERT_EQ(IterStatus(iter), "c->vc");
1249  iter->Next();
1250  ASSERT_EQ(IterStatus(iter), "(invalid)");
1251  iter->SeekToLast();
1252  ASSERT_EQ(IterStatus(iter), "c->vc");
1253  iter->Prev();
1254  ASSERT_EQ(IterStatus(iter), "b->vb");
1255  iter->Prev();
1256  ASSERT_EQ(IterStatus(iter), "a->va");
1257  iter->Prev();
1258  ASSERT_EQ(IterStatus(iter), "(invalid)");
1259 
1260  delete iter;
1261  } while (ChangeCompactOptions());
1262 }

Here is the call graph for this function:

rocksdb::TEST ( TableTest  ,
BlockCacheLeak   
)

Definition at line 1183 of file table_test.cc.

References rocksdb::Constructor::Add(), ASSERT_OK, ASSERT_TRUE, rocksdb::Options::block_cache, rocksdb::Options::block_size, BytewiseComparator(), rocksdb::Options::compression, rocksdb::Constructor::Finish(), kNoCompression, rocksdb::BlockBasedTableConstructor::NewIterator(), NewLRUCache(), rocksdb::BlockBasedTableConstructor::Reopen(), rocksdb::BlockBasedTableConstructor::table_reader(), and rocksdb::TableReader::TEST_KeyInCache().

1183  {
1184  // Check that when we reopen a table we don't lose access to blocks already
1185  // in the cache. This test checks whether the Table actually makes use of the
1186  // unique ID from the file.
1187 
1188  Options opt;
1189  opt.block_size = 1024;
1190  opt.compression = kNoCompression;
1191  opt.block_cache = NewLRUCache(16*1024*1024); // big enough so we don't ever
1192  // lose cached values.
1193 
1194  BlockBasedTableConstructor c(BytewiseComparator());
1195  c.Add("k01", "hello");
1196  c.Add("k02", "hello2");
1197  c.Add("k03", std::string(10000, 'x'));
1198  c.Add("k04", std::string(200000, 'x'));
1199  c.Add("k05", std::string(300000, 'x'));
1200  c.Add("k06", "hello3");
1201  c.Add("k07", std::string(100000, 'x'));
1202  std::vector<std::string> keys;
1203  KVMap kvmap;
1204  c.Finish(opt, &keys, &kvmap);
1205 
1206  unique_ptr<Iterator> iter(c.NewIterator());
1207  iter->SeekToFirst();
1208  while (iter->Valid()) {
1209  iter->key();
1210  iter->value();
1211  iter->Next();
1212  }
1213  ASSERT_OK(iter->status());
1214 
1215  ASSERT_OK(c.Reopen(opt));
1216  for (const std::string& key: keys) {
1217  ASSERT_TRUE(c.table_reader()->TEST_KeyInCache(ReadOptions(), key));
1218  }
1219 }

Here is the call graph for this function:

rocksdb::TEST ( Harness  ,
Randomized   
)

Definition at line 1221 of file table_test.cc.

References GenerateArgList(), rocksdb::test::RandomKey(), rocksdb::test::RandomSeed(), rocksdb::test::RandomString(), and rocksdb::Random::Skewed().

1221  {
1222  std::vector<TestArgs> args = GenerateArgList();
1223  for (unsigned int i = 0; i < args.size(); i++) {
1224  Init(args[i]);
1225  Random rnd(test::RandomSeed() + 5);
1226  for (int num_entries = 0; num_entries < 2000;
1227  num_entries += (num_entries < 50 ? 1 : 200)) {
1228  if ((num_entries % 10) == 0) {
1229  fprintf(stderr, "case %d of %d: num_entries = %d\n",
1230  (i + 1), int(args.size()), num_entries);
1231  }
1232  for (int e = 0; e < num_entries; e++) {
1233  std::string v;
1234  Add(test::RandomKey(&rnd, rnd.Skewed(4)),
1235  test::RandomString(&rnd, rnd.Skewed(5), &v).ToString());
1236  }
1237  Test(&rnd);
1238  }
1239  }
1240 }

Here is the call graph for this function:

rocksdb::TEST ( Harness  ,
RandomizedLongDB   
)

Definition at line 1242 of file table_test.cc.

References ASSERT_GT, ASSERT_TRUE, db, DB_TEST, kNoCompression, name, rocksdb::DBImpl::NumberLevels(), rocksdb::test::RandomKey(), rocksdb::test::RandomSeed(), rocksdb::test::RandomString(), rocksdb::Random::Skewed(), and value.

1242  {
1243  Random rnd(test::RandomSeed());
1244  TestArgs args = { DB_TEST, false, 16, kNoCompression };
1245  Init(args);
1246  int num_entries = 100000;
1247  for (int e = 0; e < num_entries; e++) {
1248  std::string v;
1249  Add(test::RandomKey(&rnd, rnd.Skewed(4)),
1250  test::RandomString(&rnd, rnd.Skewed(5), &v).ToString());
1251  }
1252  Test(&rnd);
1253 
1254  // We must have created enough data to force merging
1255  int files = 0;
1256  for (int level = 0; level < db()->NumberLevels(); level++) {
1257  std::string value;
1258  char name[100];
1259  snprintf(name, sizeof(name), "rocksdb.num-files-at-level%d", level);
1260  ASSERT_TRUE(db()->GetProperty(name, &value));
1261  files += atoi(value.c_str());
1262  }
1263  ASSERT_GT(files, 0);
1264 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
IterReseek   
)

Definition at line 1266 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, rocksdb::Options::create_if_missing, CreateDBStatistics(), db_, rocksdb::Options::max_sequential_skip_in_iterations, rocksdb::DBImpl::NewIterator(), rocksdb::Iterator::Next(), NUMBER_OF_RESEEKS_IN_ITERATION, rocksdb::Iterator::Prev(), rocksdb::Iterator::SeekToFirst(), rocksdb::Iterator::SeekToLast(), and rocksdb::Options::statistics.

1266  {
1267  Options options = CurrentOptions();
1268  options.max_sequential_skip_in_iterations = 3;
1269  options.create_if_missing = true;
1270  options.statistics = rocksdb::CreateDBStatistics();
1271  DestroyAndReopen(&options);
1272 
1273  // insert two keys with same userkey and verify that
1274  // reseek is not invoked. For each of these test cases,
1275  // verify that we can find the next key "b".
1276  ASSERT_OK(Put("a", "one"));
1277  ASSERT_OK(Put("a", "two"));
1278  ASSERT_OK(Put("b", "bone"));
1279  Iterator* iter = db_->NewIterator(ReadOptions());
1280  iter->SeekToFirst();
1281  ASSERT_EQ(options.statistics.get()->getTickerCount(
1283  ASSERT_EQ(IterStatus(iter), "a->two");
1284  iter->Next();
1285  ASSERT_EQ(options.statistics.get()->getTickerCount(
1287  ASSERT_EQ(IterStatus(iter), "b->bone");
1288  delete iter;
1289 
1290  // insert a total of three keys with same userkey and verify
1291  // that reseek is still not invoked.
1292  ASSERT_OK(Put("a", "three"));
1293  iter = db_->NewIterator(ReadOptions());
1294  iter->SeekToFirst();
1295  ASSERT_EQ(IterStatus(iter), "a->three");
1296  iter->Next();
1297  ASSERT_EQ(options.statistics.get()->getTickerCount(
1299  ASSERT_EQ(IterStatus(iter), "b->bone");
1300  delete iter;
1301 
1302  // insert a total of four keys with same userkey and verify
1303  // that reseek is invoked.
1304  ASSERT_OK(Put("a", "four"));
1305  iter = db_->NewIterator(ReadOptions());
1306  iter->SeekToFirst();
1307  ASSERT_EQ(IterStatus(iter), "a->four");
1308  ASSERT_EQ(options.statistics.get()->getTickerCount(
1310  iter->Next();
1311  ASSERT_EQ(options.statistics.get()->getTickerCount(
1313  ASSERT_EQ(IterStatus(iter), "b->bone");
1314  delete iter;
1315 
1316  // Testing reverse iterator
1317  // At this point, we have three versions of "a" and one version of "b".
1318  // The reseek statistics is already at 1.
1319  int num_reseeks = (int)options.statistics.get()->getTickerCount(
1321 
1322  // Insert another version of b and assert that reseek is not invoked
1323  ASSERT_OK(Put("b", "btwo"));
1324  iter = db_->NewIterator(ReadOptions());
1325  iter->SeekToLast();
1326  ASSERT_EQ(IterStatus(iter), "b->btwo");
1327  ASSERT_EQ(options.statistics.get()->getTickerCount(
1328  NUMBER_OF_RESEEKS_IN_ITERATION), num_reseeks);
1329  iter->Prev();
1330  ASSERT_EQ(options.statistics.get()->getTickerCount(
1331  NUMBER_OF_RESEEKS_IN_ITERATION), num_reseeks+1);
1332  ASSERT_EQ(IterStatus(iter), "a->four");
1333  delete iter;
1334 
1335  // insert two more versions of b. This makes a total of 4 versions
1336  // of b and 4 versions of a.
1337  ASSERT_OK(Put("b", "bthree"));
1338  ASSERT_OK(Put("b", "bfour"));
1339  iter = db_->NewIterator(ReadOptions());
1340  iter->SeekToLast();
1341  ASSERT_EQ(IterStatus(iter), "b->bfour");
1342  ASSERT_EQ(options.statistics.get()->getTickerCount(
1343  NUMBER_OF_RESEEKS_IN_ITERATION), num_reseeks + 2);
1344  iter->Prev();
1345 
1346  // the previous Prev call should have invoked reseek
1347  ASSERT_EQ(options.statistics.get()->getTickerCount(
1348  NUMBER_OF_RESEEKS_IN_ITERATION), num_reseeks + 3);
1349  ASSERT_EQ(IterStatus(iter), "a->four");
1350  delete iter;
1351 }

Here is the call graph for this function:

rocksdb::TEST ( MemTableTest  ,
Simple   
)

Definition at line 1268 of file table_test.cc.

References ASSERT_TRUE, BytewiseComparator(), rocksdb::WriteBatchInternal::InsertInto(), rocksdb::Iterator::key(), rocksdb::Iterator::Next(), ripple::Resource::ok, rocksdb::WriteBatch::Put(), rocksdb::Iterator::SeekToFirst(), rocksdb::WriteBatchInternal::SetSequence(), rocksdb::Slice::ToString(), rocksdb::Iterator::Valid(), and rocksdb::Iterator::value().

1268  {
1269  InternalKeyComparator cmp(BytewiseComparator());
1270  auto table_factory = std::make_shared<SkipListFactory>();
1271  MemTable* memtable = new MemTable(cmp, table_factory);
1272  memtable->Ref();
1273  WriteBatch batch;
1274  Options options;
1275  WriteBatchInternal::SetSequence(&batch, 100);
1276  batch.Put(std::string("k1"), std::string("v1"));
1277  batch.Put(std::string("k2"), std::string("v2"));
1278  batch.Put(std::string("k3"), std::string("v3"));
1279  batch.Put(std::string("largekey"), std::string("vlarge"));
1280  ASSERT_TRUE(WriteBatchInternal::InsertInto(&batch, memtable, &options).ok());
1281 
1282  Iterator* iter = memtable->NewIterator();
1283  iter->SeekToFirst();
1284  while (iter->Valid()) {
1285  fprintf(stderr, "key: '%s' -> '%s'\n",
1286  iter->key().ToString().c_str(),
1287  iter->value().ToString().c_str());
1288  iter->Next();
1289  }
1290 
1291  delete iter;
1292  memtable->Unref();
1293 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
IterSmallAndLargeMix   
)

Definition at line 1353 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, db_, rocksdb::DBImpl::NewIterator(), rocksdb::Iterator::Next(), rocksdb::Iterator::Prev(), rocksdb::Iterator::SeekToFirst(), and rocksdb::Iterator::SeekToLast().

1353  {
1354  do {
1355  ASSERT_OK(Put("a", "va"));
1356  ASSERT_OK(Put("b", std::string(100000, 'b')));
1357  ASSERT_OK(Put("c", "vc"));
1358  ASSERT_OK(Put("d", std::string(100000, 'd')));
1359  ASSERT_OK(Put("e", std::string(100000, 'e')));
1360 
1361  Iterator* iter = db_->NewIterator(ReadOptions());
1362 
1363  iter->SeekToFirst();
1364  ASSERT_EQ(IterStatus(iter), "a->va");
1365  iter->Next();
1366  ASSERT_EQ(IterStatus(iter), "b->" + std::string(100000, 'b'));
1367  iter->Next();
1368  ASSERT_EQ(IterStatus(iter), "c->vc");
1369  iter->Next();
1370  ASSERT_EQ(IterStatus(iter), "d->" + std::string(100000, 'd'));
1371  iter->Next();
1372  ASSERT_EQ(IterStatus(iter), "e->" + std::string(100000, 'e'));
1373  iter->Next();
1374  ASSERT_EQ(IterStatus(iter), "(invalid)");
1375 
1376  iter->SeekToLast();
1377  ASSERT_EQ(IterStatus(iter), "e->" + std::string(100000, 'e'));
1378  iter->Prev();
1379  ASSERT_EQ(IterStatus(iter), "d->" + std::string(100000, 'd'));
1380  iter->Prev();
1381  ASSERT_EQ(IterStatus(iter), "c->vc");
1382  iter->Prev();
1383  ASSERT_EQ(IterStatus(iter), "b->" + std::string(100000, 'b'));
1384  iter->Prev();
1385  ASSERT_EQ(IterStatus(iter), "a->va");
1386  iter->Prev();
1387  ASSERT_EQ(IterStatus(iter), "(invalid)");
1388 
1389  delete iter;
1390  } while (ChangeCompactOptions());
1391 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
IterMultiWithDelete   
)

Definition at line 1393 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, db_, merge_operator, rocksdb::DBImpl::NewIterator(), rocksdb::Iterator::Prev(), and rocksdb::Iterator::Seek().

1393  {
1394  do {
1395  ASSERT_OK(Put("a", "va"));
1396  ASSERT_OK(Put("b", "vb"));
1397  ASSERT_OK(Put("c", "vc"));
1398  ASSERT_OK(Delete("b"));
1399  ASSERT_EQ("NOT_FOUND", Get("b"));
1400 
1401  Iterator* iter = db_->NewIterator(ReadOptions());
1402  iter->Seek("c");
1403  ASSERT_EQ(IterStatus(iter), "c->vc");
1404  if (!CurrentOptions().merge_operator) {
1405  // TODO: merge operator does not support backward iteration yet
1406  iter->Prev();
1407  ASSERT_EQ(IterStatus(iter), "a->va");
1408  }
1409  delete iter;
1410  } while (ChangeOptions());
1411 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
IterPrevMaxSkip   
)

Definition at line 1413 of file db_test.cc.

References ASSERT_OK, db_, rocksdb::DBImpl::Delete(), and rocksdb::DBImpl::Put().

1413  {
1414  do {
1415  for (int i = 0; i < 2; i++) {
1416  db_->Put(WriteOptions(), "key1", "v1");
1417  db_->Put(WriteOptions(), "key2", "v2");
1418  db_->Put(WriteOptions(), "key3", "v3");
1419  db_->Put(WriteOptions(), "key4", "v4");
1420  db_->Put(WriteOptions(), "key5", "v5");
1421  }
1422 
1423  VerifyIterLast("key5->v5");
1424 
1425  ASSERT_OK(db_->Delete(WriteOptions(), "key5"));
1426  VerifyIterLast("key4->v4");
1427 
1428  ASSERT_OK(db_->Delete(WriteOptions(), "key4"));
1429  VerifyIterLast("key3->v3");
1430 
1431  ASSERT_OK(db_->Delete(WriteOptions(), "key3"));
1432  VerifyIterLast("key2->v2");
1433 
1434  ASSERT_OK(db_->Delete(WriteOptions(), "key2"));
1435  VerifyIterLast("key1->v1");
1436 
1437  ASSERT_OK(db_->Delete(WriteOptions(), "key1"));
1438  VerifyIterLast("(invalid)");
1439  } while (ChangeOptions(kSkipMergePut));
1440 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
IterWithSnapshot   
)

Definition at line 1442 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, ASSERT_TRUE, db_, rocksdb::DBImpl::GetSnapshot(), merge_operator, rocksdb::DBImpl::NewIterator(), rocksdb::Iterator::Next(), rocksdb::Iterator::Prev(), rocksdb::DBImpl::ReleaseSnapshot(), rocksdb::Iterator::Seek(), rocksdb::ReadOptions::snapshot, and rocksdb::Iterator::Valid().

1442  {
1443  do {
1444  ASSERT_OK(Put("key1", "val1"));
1445  ASSERT_OK(Put("key2", "val2"));
1446  ASSERT_OK(Put("key3", "val3"));
1447  ASSERT_OK(Put("key4", "val4"));
1448  ASSERT_OK(Put("key5", "val5"));
1449 
1450  const Snapshot *snapshot = db_->GetSnapshot();
1451  ReadOptions options;
1452  options.snapshot = snapshot;
1453  Iterator* iter = db_->NewIterator(options);
1454 
1455  // Put more values after the snapshot
1456  ASSERT_OK(Put("key100", "val100"));
1457  ASSERT_OK(Put("key101", "val101"));
1458 
1459  iter->Seek("key5");
1460  ASSERT_EQ(IterStatus(iter), "key5->val5");
1461  if (!CurrentOptions().merge_operator) {
1462  // TODO: merge operator does not support backward iteration yet
1463  iter->Prev();
1464  ASSERT_EQ(IterStatus(iter), "key4->val4");
1465  iter->Prev();
1466  ASSERT_EQ(IterStatus(iter), "key3->val3");
1467 
1468  iter->Next();
1469  ASSERT_EQ(IterStatus(iter), "key4->val4");
1470  iter->Next();
1471  ASSERT_EQ(IterStatus(iter), "key5->val5");
1472  iter->Next();
1473  ASSERT_TRUE(!iter->Valid());
1474  }
1475  db_->ReleaseSnapshot(snapshot);
1476  delete iter;
1477  } while (ChangeOptions());
1478 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
Recover   
)

Definition at line 1480 of file db_test.cc.

References ASSERT_EQ, and ASSERT_OK.

1480  {
1481  do {
1482  ASSERT_OK(Put("foo", "v1"));
1483  ASSERT_OK(Put("baz", "v5"));
1484 
1485  Reopen();
1486  ASSERT_EQ("v1", Get("foo"));
1487 
1488  ASSERT_EQ("v1", Get("foo"));
1489  ASSERT_EQ("v5", Get("baz"));
1490  ASSERT_OK(Put("bar", "v2"));
1491  ASSERT_OK(Put("foo", "v3"));
1492 
1493  Reopen();
1494  ASSERT_EQ("v3", Get("foo"));
1495  ASSERT_OK(Put("foo", "v4"));
1496  ASSERT_EQ("v4", Get("foo"));
1497  ASSERT_EQ("v2", Get("bar"));
1498  ASSERT_EQ("v5", Get("baz"));
1499  } while (ChangeOptions());
1500 }
rocksdb::TEST ( DBTest  ,
RollLog   
)

Definition at line 1502 of file db_test.cc.

References ASSERT_OK.

1502  {
1503  do {
1504  ASSERT_OK(Put("foo", "v1"));
1505  ASSERT_OK(Put("baz", "v5"));
1506 
1507  Reopen();
1508  for (int i = 0; i < 10; i++) {
1509  Reopen();
1510  }
1511  ASSERT_OK(Put("foo", "v4"));
1512  for (int i = 0; i < 10; i++) {
1513  Reopen();
1514  }
1515  } while (ChangeOptions());
1516 }
rocksdb::TEST ( DBTest  ,
WAL   
)

Definition at line 1518 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, and rocksdb::WriteOptions::disableWAL.

1518  {
1519  do {
1520  Options options = CurrentOptions();
1521  WriteOptions writeOpt = WriteOptions();
1522  writeOpt.disableWAL = true;
1523  ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v1"));
1524  ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v1"));
1525 
1526  Reopen();
1527  ASSERT_EQ("v1", Get("foo"));
1528  ASSERT_EQ("v1", Get("bar"));
1529 
1530  writeOpt.disableWAL = false;
1531  ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v2"));
1532  writeOpt.disableWAL = true;
1533  ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v2"));
1534 
1535  Reopen();
1536  // Both value's should be present.
1537  ASSERT_EQ("v2", Get("bar"));
1538  ASSERT_EQ("v2", Get("foo"));
1539 
1540  writeOpt.disableWAL = true;
1541  ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v3"));
1542  writeOpt.disableWAL = false;
1543  ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v3"));
1544 
1545  Reopen();
1546  // again both values should be present.
1547  ASSERT_EQ("v3", Get("foo"));
1548  ASSERT_EQ("v3", Get("bar"));
1549  } while (ChangeCompactOptions());
1550 }
rocksdb::TEST ( DBTest  ,
CheckLock   
)

Definition at line 1552 of file db_test.cc.

References ASSERT_OK, ASSERT_TRUE, and ripple::Resource::ok.

1552  {
1553  do {
1554  DB* localdb;
1555  Options options = CurrentOptions();
1556  ASSERT_OK(TryReopen(&options));
1557 
1558  // second open should fail
1559  ASSERT_TRUE(!(PureReopen(&options, &localdb)).ok());
1560  } while (ChangeCompactOptions());
1561 }
rocksdb::TEST ( DBTest  ,
FlushMultipleMemtable   
)

Definition at line 1563 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, rocksdb::WriteOptions::disableWAL, rocksdb::Options::max_write_buffer_number, and rocksdb::Options::min_write_buffer_number_to_merge.

1563  {
1564  do {
1565  Options options = CurrentOptions();
1566  WriteOptions writeOpt = WriteOptions();
1567  writeOpt.disableWAL = true;
1568  options.max_write_buffer_number = 4;
1569  options.min_write_buffer_number_to_merge = 3;
1570  Reopen(&options);
1571  ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v1"));
1572  dbfull()->Flush(FlushOptions());
1573  ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v1"));
1574 
1575  ASSERT_EQ("v1", Get("foo"));
1576  ASSERT_EQ("v1", Get("bar"));
1577  dbfull()->Flush(FlushOptions());
1578  } while (ChangeCompactOptions());
1579 }
rocksdb::TEST ( DBTest  ,
NumImmutableMemTable   
)

Definition at line 1581 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, ASSERT_TRUE, rocksdb::WriteOptions::disableWAL, rocksdb::Options::max_write_buffer_number, rocksdb::Options::min_write_buffer_number_to_merge, and rocksdb::Options::write_buffer_size.

1581  {
1582  do {
1583  Options options = CurrentOptions();
1584  WriteOptions writeOpt = WriteOptions();
1585  writeOpt.disableWAL = true;
1586  options.max_write_buffer_number = 4;
1587  options.min_write_buffer_number_to_merge = 3;
1588  options.write_buffer_size = 1000000;
1589  Reopen(&options);
1590 
1591  std::string big_value(1000000, 'x');
1592  std::string num;
1593 
1594  ASSERT_OK(dbfull()->Put(writeOpt, "k1", big_value));
1595  ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
1596  ASSERT_EQ(num, "0");
1597 
1598  ASSERT_OK(dbfull()->Put(writeOpt, "k2", big_value));
1599  ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
1600  ASSERT_EQ(num, "1");
1601 
1602  ASSERT_OK(dbfull()->Put(writeOpt, "k3", big_value));
1603  ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
1604  ASSERT_EQ(num, "2");
1605 
1606  dbfull()->Flush(FlushOptions());
1607  ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
1608  ASSERT_EQ(num, "0");
1609  } while (ChangeCompactOptions());
1610 }
rocksdb::TEST ( DBTest  ,
FLUSH   
)

Definition at line 1612 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, and rocksdb::WriteOptions::disableWAL.

1612  {
1613  do {
1614  Options options = CurrentOptions();
1615  WriteOptions writeOpt = WriteOptions();
1616  writeOpt.disableWAL = true;
1617  ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v1"));
1618  // this will now also flush the last 2 writes
1619  dbfull()->Flush(FlushOptions());
1620  ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v1"));
1621 
1622  Reopen();
1623  ASSERT_EQ("v1", Get("foo"));
1624  ASSERT_EQ("v1", Get("bar"));
1625 
1626  writeOpt.disableWAL = true;
1627  ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v2"));
1628  ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v2"));
1629  dbfull()->Flush(FlushOptions());
1630 
1631  Reopen();
1632  ASSERT_EQ("v2", Get("bar"));
1633  ASSERT_EQ("v2", Get("foo"));
1634 
1635  writeOpt.disableWAL = false;
1636  ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v3"));
1637  ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v3"));
1638  dbfull()->Flush(FlushOptions());
1639 
1640  Reopen();
1641  // 'foo' should be there because its put
1642  // has WAL enabled.
1643  ASSERT_EQ("v3", Get("foo"));
1644  ASSERT_EQ("v3", Get("bar"));
1645  } while (ChangeCompactOptions());
1646 }
rocksdb::TEST ( DBTest  ,
RecoveryWithEmptyLog   
)

Definition at line 1648 of file db_test.cc.

References ASSERT_EQ, and ASSERT_OK.

1648  {
1649  do {
1650  ASSERT_OK(Put("foo", "v1"));
1651  ASSERT_OK(Put("foo", "v2"));
1652  Reopen();
1653  Reopen();
1654  ASSERT_OK(Put("foo", "v3"));
1655  Reopen();
1656  ASSERT_EQ("v3", Get("foo"));
1657  } while (ChangeOptions());
1658 }
rocksdb::TEST ( DBTest  ,
RecoverDuringMemtableCompaction   
)

Definition at line 1662 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, rocksdb::Options::env, env_, and rocksdb::Options::write_buffer_size.

1662  {
1663  do {
1664  Options options = CurrentOptions();
1665  options.env = env_;
1666  options.write_buffer_size = 1000000;
1667  Reopen(&options);
1668 
1669  // Trigger a long memtable compaction and reopen the database during it
1670  ASSERT_OK(Put("foo", "v1")); // Goes to 1st log file
1671  ASSERT_OK(Put("big1", std::string(10000000, 'x'))); // Fills memtable
1672  ASSERT_OK(Put("big2", std::string(1000, 'y'))); // Triggers compaction
1673  ASSERT_OK(Put("bar", "v2")); // Goes to new log file
1674 
1675  Reopen(&options);
1676  ASSERT_EQ("v1", Get("foo"));
1677  ASSERT_EQ("v2", Get("bar"));
1678  ASSERT_EQ(std::string(10000000, 'x'), Get("big1"));
1679  ASSERT_EQ(std::string(1000, 'y'), Get("big2"));
1680  } while (ChangeOptions());
1681 }
rocksdb::TEST ( DBTest  ,
MinorCompactionsHappen   
)

Definition at line 1683 of file db_test.cc.

References ASSERT_EQ, ASSERT_GT, ASSERT_OK, and rocksdb::Options::write_buffer_size.

1683  {
1684  do {
1685  Options options = CurrentOptions();
1686  options.write_buffer_size = 10000;
1687  Reopen(&options);
1688 
1689  const int N = 500;
1690 
1691  int starting_num_tables = TotalTableFiles();
1692  for (int i = 0; i < N; i++) {
1693  ASSERT_OK(Put(Key(i), Key(i) + std::string(1000, 'v')));
1694  }
1695  int ending_num_tables = TotalTableFiles();
1696  ASSERT_GT(ending_num_tables, starting_num_tables);
1697 
1698  for (int i = 0; i < N; i++) {
1699  ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(Key(i)));
1700  }
1701 
1702  Reopen();
1703 
1704  for (int i = 0; i < N; i++) {
1705  ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(Key(i)));
1706  }
1707  } while (ChangeCompactOptions());
1708 }
rocksdb::TEST ( DBTest  ,
ManifestRollOver   
)

Definition at line 1710 of file db_test.cc.

References ASSERT_EQ, ASSERT_GT, ASSERT_OK, and rocksdb::Options::max_manifest_file_size.

1710  {
1711  do {
1712  Options options = CurrentOptions();
1713  options.max_manifest_file_size = 10 ; // 10 bytes
1714  Reopen(&options);
1715  {
1716  ASSERT_OK(Put("manifest_key1", std::string(1000, '1')));
1717  ASSERT_OK(Put("manifest_key2", std::string(1000, '2')));
1718  ASSERT_OK(Put("manifest_key3", std::string(1000, '3')));
1719  uint64_t manifest_before_flush =
1720  dbfull()->TEST_Current_Manifest_FileNo();
1721  dbfull()->Flush(FlushOptions()); // This should trigger LogAndApply.
1722  uint64_t manifest_after_flush =
1723  dbfull()->TEST_Current_Manifest_FileNo();
1724  ASSERT_GT(manifest_after_flush, manifest_before_flush);
1725  Reopen(&options);
1726  ASSERT_GT(dbfull()->TEST_Current_Manifest_FileNo(),
1727  manifest_after_flush);
1728  // check if a new manifest file got inserted or not.
1729  ASSERT_EQ(std::string(1000, '1'), Get("manifest_key1"));
1730  ASSERT_EQ(std::string(1000, '2'), Get("manifest_key2"));
1731  ASSERT_EQ(std::string(1000, '3'), Get("manifest_key3"));
1732  }
1733  } while (ChangeCompactOptions());
1734 }
rocksdb::TEST ( DBTest  ,
IdentityAcrossRestarts   
)

Definition at line 1736 of file db_test.cc.

References ASSERT_EQ, ASSERT_NE, ASSERT_OK, dbname_, rocksdb::Env::DeleteFile(), env_, IdentityFileName(), rocksdb::Env::NewSequentialFile(), rocksdb::SequentialFile::Read(), and rocksdb::Slice::ToString().

1736  {
1737  do {
1738  std::string idfilename = IdentityFileName(dbname_);
1739  unique_ptr<SequentialFile> idfile;
1740  const EnvOptions soptions;
1741  ASSERT_OK(env_->NewSequentialFile(idfilename, &idfile, soptions));
1742  char buffer1[100];
1743  Slice id1;
1744  ASSERT_OK(idfile->Read(100, &id1, buffer1));
1745 
1746  Options options = CurrentOptions();
1747  Reopen(&options);
1748  char buffer2[100];
1749  Slice id2;
1750  ASSERT_OK(env_->NewSequentialFile(idfilename, &idfile, soptions));
1751  ASSERT_OK(idfile->Read(100, &id2, buffer2));
1752  // id1 should match id2 because identity was not regenerated
1753  ASSERT_EQ(id1.ToString(), id2.ToString());
1754 
1755  ASSERT_OK(env_->DeleteFile(idfilename));
1756  Reopen(&options);
1757  char buffer3[100];
1758  Slice id3;
1759  ASSERT_OK(env_->NewSequentialFile(idfilename, &idfile, soptions));
1760  ASSERT_OK(idfile->Read(100, &id3, buffer3));
1761  // id1 should NOT match id2 because identity was regenerated
1762  ASSERT_NE(id1.ToString(0), id3.ToString());
1763  } while (ChangeCompactOptions());
1764 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
RecoverWithLargeLog   
)

Definition at line 1766 of file db_test.cc.

References ASSERT_EQ, ASSERT_GT, ASSERT_OK, and rocksdb::Options::write_buffer_size.

1766  {
1767  do {
1768  {
1769  Options options = CurrentOptions();
1770  Reopen(&options);
1771  ASSERT_OK(Put("big1", std::string(200000, '1')));
1772  ASSERT_OK(Put("big2", std::string(200000, '2')));
1773  ASSERT_OK(Put("small3", std::string(10, '3')));
1774  ASSERT_OK(Put("small4", std::string(10, '4')));
1775  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
1776  }
1777 
1778  // Make sure that if we re-open with a small write buffer size that
1779  // we flush table files in the middle of a large log file.
1780  Options options = CurrentOptions();
1781  options.write_buffer_size = 100000;
1782  Reopen(&options);
1783  ASSERT_EQ(NumTableFilesAtLevel(0), 3);
1784  ASSERT_EQ(std::string(200000, '1'), Get("big1"));
1785  ASSERT_EQ(std::string(200000, '2'), Get("big2"));
1786  ASSERT_EQ(std::string(10, '3'), Get("small3"));
1787  ASSERT_EQ(std::string(10, '4'), Get("small4"));
1788  ASSERT_GT(NumTableFilesAtLevel(0), 1);
1789  } while (ChangeCompactOptions());
1790 }
rocksdb::TEST ( DBTest  ,
CompactionsGenerateMultipleFiles   
)

Definition at line 1792 of file db_test.cc.

References ASSERT_EQ, ASSERT_GT, ASSERT_OK, RandomString(), and rocksdb::Options::write_buffer_size.

1792  {
1793  Options options = CurrentOptions();
1794  options.write_buffer_size = 100000000; // Large write buffer
1795  Reopen(&options);
1796 
1797  Random rnd(301);
1798 
1799  // Write 8MB (80 values, each 100K)
1800  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
1801  std::vector<std::string> values;
1802  for (int i = 0; i < 80; i++) {
1803  values.push_back(RandomString(&rnd, 100000));
1804  ASSERT_OK(Put(Key(i), values[i]));
1805  }
1806 
1807  // Reopening moves updates to level-0
1808  Reopen(&options);
1809  dbfull()->TEST_CompactRange(0, nullptr, nullptr);
1810 
1811  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
1812  ASSERT_GT(NumTableFilesAtLevel(1), 1);
1813  for (int i = 0; i < 80; i++) {
1814  ASSERT_EQ(Get(Key(i)), values[i]);
1815  }
1816 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
CompactionTrigger   
)

Definition at line 1906 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, rocksdb::Options::level0_file_num_compaction_trigger, rocksdb::Options::max_mem_compaction_level, rocksdb::Options::num_levels, RandomString(), and rocksdb::Options::write_buffer_size.

1906  {
1907  Options options = CurrentOptions();
1908  options.write_buffer_size = 100<<10; //100KB
1909  options.num_levels = 3;
1910  options.max_mem_compaction_level = 0;
1911  options.level0_file_num_compaction_trigger = 3;
1912  Reopen(&options);
1913 
1914  Random rnd(301);
1915 
1916  for (int num = 0;
1917  num < options.level0_file_num_compaction_trigger - 1;
1918  num++) {
1919  std::vector<std::string> values;
1920  // Write 120KB (12 values, each 10K)
1921  for (int i = 0; i < 12; i++) {
1922  values.push_back(RandomString(&rnd, 10000));
1923  ASSERT_OK(Put(Key(i), values[i]));
1924  }
1925  dbfull()->TEST_WaitForFlushMemTable();
1926  ASSERT_EQ(NumTableFilesAtLevel(0), num + 1);
1927  }
1928 
1929  //generate one more file in level-0, and should trigger level-0 compaction
1930  std::vector<std::string> values;
1931  for (int i = 0; i < 12; i++) {
1932  values.push_back(RandomString(&rnd, 10000));
1933  ASSERT_OK(Put(Key(i), values[i]));
1934  }
1935  dbfull()->TEST_WaitForCompact();
1936 
1937  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
1938  ASSERT_EQ(NumTableFilesAtLevel(1), 1);
1939 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
UniversalCompactionTrigger   
)

Definition at line 1941 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, rocksdb::Options::compaction_style, kCompactionStyleUniversal, rocksdb::Options::level0_file_num_compaction_trigger, rocksdb::Options::num_levels, RandomString(), and rocksdb::Options::write_buffer_size.

1941  {
1942  Options options = CurrentOptions();
1943  options.compaction_style = kCompactionStyleUniversal;
1944  options.write_buffer_size = 100<<10; //100KB
1945  // trigger compaction if there are >= 4 files
1946  options.level0_file_num_compaction_trigger = 4;
1947  Reopen(&options);
1948 
1949  Random rnd(301);
1950  int key_idx = 0;
1951 
1952  // Stage 1:
1953  // Generate a set of files at level 0, but don't trigger level-0
1954  // compaction.
1955  for (int num = 0;
1956  num < options.level0_file_num_compaction_trigger-1;
1957  num++) {
1958  // Write 120KB (12 values, each 10K)
1959  for (int i = 0; i < 12; i++) {
1960  ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
1961  key_idx++;
1962  }
1963  dbfull()->TEST_WaitForFlushMemTable();
1964  ASSERT_EQ(NumTableFilesAtLevel(0), num + 1);
1965  }
1966 
1967  // Generate one more file at level-0, which should trigger level-0
1968  // compaction.
1969  for (int i = 0; i < 12; i++) {
1970  ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
1971  key_idx++;
1972  }
1973  dbfull()->TEST_WaitForCompact();
1974  // Suppose each file flushed from mem table has size 1. Now we compact
1975  // (level0_file_num_compaction_trigger+1)=4 files and should have a big
1976  // file of size 4.
1977  ASSERT_EQ(NumTableFilesAtLevel(0), 1);
1978  for (int i = 1; i < options.num_levels ; i++) {
1979  ASSERT_EQ(NumTableFilesAtLevel(i), 0);
1980  }
1981 
1982  // Stage 2:
1983  // Now we have one file at level 0, with size 4. We also have some data in
1984  // mem table. Let's continue generating new files at level 0, but don't
1985  // trigger level-0 compaction.
1986  // First, clean up memtable before inserting new data. This will generate
1987  // a level-0 file, with size around 0.4 (according to previously written
1988  // data amount).
1989  dbfull()->Flush(FlushOptions());
1990  for (int num = 0;
1991  num < options.level0_file_num_compaction_trigger-3;
1992  num++) {
1993  // Write 120KB (12 values, each 10K)
1994  for (int i = 0; i < 12; i++) {
1995  ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
1996  key_idx++;
1997  }
1998  dbfull()->TEST_WaitForFlushMemTable();
1999  ASSERT_EQ(NumTableFilesAtLevel(0), num + 3);
2000  }
2001 
2002  // Generate one more file at level-0, which should trigger level-0
2003  // compaction.
2004  for (int i = 0; i < 12; i++) {
2005  ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
2006  key_idx++;
2007  }
2008  dbfull()->TEST_WaitForCompact();
2009  // Before compaction, we have 4 files at level 0, with size 4, 0.4, 1, 1.
2010  // After comapction, we should have 2 files, with size 4, 2.4.
2011  ASSERT_EQ(NumTableFilesAtLevel(0), 2);
2012  for (int i = 1; i < options.num_levels ; i++) {
2013  ASSERT_EQ(NumTableFilesAtLevel(i), 0);
2014  }
2015 
2016  // Stage 3:
2017  // Now we have 2 files at level 0, with size 4 and 2.4. Continue
2018  // generating new files at level 0.
2019  for (int num = 0;
2020  num < options.level0_file_num_compaction_trigger-3;
2021  num++) {
2022  // Write 120KB (12 values, each 10K)
2023  for (int i = 0; i < 12; i++) {
2024  ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
2025  key_idx++;
2026  }
2027  dbfull()->TEST_WaitForFlushMemTable();
2028  ASSERT_EQ(NumTableFilesAtLevel(0), num + 3);
2029  }
2030 
2031  // Generate one more file at level-0, which should trigger level-0
2032  // compaction.
2033  for (int i = 0; i < 12; i++) {
2034  ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
2035  key_idx++;
2036  }
2037  dbfull()->TEST_WaitForCompact();
2038  // Before compaction, we have 4 files at level 0, with size 4, 2.4, 1, 1.
2039  // After comapction, we should have 3 files, with size 4, 2.4, 2.
2040  ASSERT_EQ(NumTableFilesAtLevel(0), 3);
2041  for (int i = 1; i < options.num_levels ; i++) {
2042  ASSERT_EQ(NumTableFilesAtLevel(i), 0);
2043  }
2044 
2045  // Stage 4:
2046  // Now we have 3 files at level 0, with size 4, 2.4, 2. Let's generate a
2047  // new file of size 1.
2048  for (int i = 0; i < 12; i++) {
2049  ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
2050  key_idx++;
2051  }
2052  dbfull()->TEST_WaitForCompact();
2053  // Level-0 compaction is triggered, but no file will be picked up.
2054  ASSERT_EQ(NumTableFilesAtLevel(0), 4);
2055  for (int i = 1; i < options.num_levels ; i++) {
2056  ASSERT_EQ(NumTableFilesAtLevel(i), 0);
2057  }
2058 
2059  // Stage 5:
2060  // Now we have 4 files at level 0, with size 4, 2.4, 2, 1. Let's generate
2061  // a new file of size 1.
2062  for (int i = 0; i < 12; i++) {
2063  ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
2064  key_idx++;
2065  }
2066  dbfull()->TEST_WaitForCompact();
2067  // All files at level 0 will be compacted into a single one.
2068  ASSERT_EQ(NumTableFilesAtLevel(0), 1);
2069  for (int i = 1; i < options.num_levels ; i++) {
2070  ASSERT_EQ(NumTableFilesAtLevel(i), 0);
2071  }
2072 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
UniversalCompactionSizeAmplification   
)

Definition at line 2074 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, rocksdb::Options::compaction_options_universal, rocksdb::Options::compaction_style, kCompactionStyleUniversal, rocksdb::Options::level0_file_num_compaction_trigger, RandomString(), and rocksdb::Options::write_buffer_size.

2074  {
2075  Options options = CurrentOptions();
2076  options.compaction_style = kCompactionStyleUniversal;
2077  options.write_buffer_size = 100<<10; //100KB
2078  options.level0_file_num_compaction_trigger = 3;
2079 
2080  // Trigger compaction if size amplification exceeds 110%
2081  options.compaction_options_universal.
2082  max_size_amplification_percent = 110;
2083  Reopen(&options);
2084 
2085  Random rnd(301);
2086  int key_idx = 0;
2087 
2088  // Generate two files in Level 0. Both files are approx the same size.
2089  for (int num = 0;
2090  num < options.level0_file_num_compaction_trigger-1;
2091  num++) {
2092  // Write 120KB (12 values, each 10K)
2093  for (int i = 0; i < 12; i++) {
2094  ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
2095  key_idx++;
2096  }
2097  dbfull()->TEST_WaitForFlushMemTable();
2098  ASSERT_EQ(NumTableFilesAtLevel(0), num + 1);
2099  }
2100  ASSERT_EQ(NumTableFilesAtLevel(0), 2);
2101 
2102  // Flush whatever is remaining in memtable. This is typically
2103  // small, which should not trigger size ratio based compaction
2104  // but will instead trigger size amplification.
2105  dbfull()->Flush(FlushOptions());
2106 
2107  dbfull()->TEST_WaitForCompact();
2108 
2109  // Verify that size amplification did occur
2110  ASSERT_EQ(NumTableFilesAtLevel(0), 1);
2111 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
UniversalCompactionOptions   
)

Definition at line 2113 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, rocksdb::Options::compaction_options_universal, rocksdb::Options::compaction_style, rocksdb::CompactionOptionsUniversal::compression_size_percent, kCompactionStyleUniversal, rocksdb::Options::level0_file_num_compaction_trigger, rocksdb::Options::num_levels, RandomString(), and rocksdb::Options::write_buffer_size.

2113  {
2114  Options options = CurrentOptions();
2115  options.compaction_style = kCompactionStyleUniversal;
2116  options.write_buffer_size = 100<<10; //100KB
2117  options.level0_file_num_compaction_trigger = 4;
2118  options.num_levels = 1;
2119  options.compaction_options_universal.compression_size_percent = -1;
2120  Reopen(&options);
2121 
2122  Random rnd(301);
2123  int key_idx = 0;
2124 
2125  for (int num = 0;
2126  num < options.level0_file_num_compaction_trigger;
2127  num++) {
2128  // Write 120KB (12 values, each 10K)
2129  for (int i = 0; i < 12; i++) {
2130  ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
2131  key_idx++;
2132  }
2133  dbfull()->TEST_WaitForFlushMemTable();
2134 
2135  if (num < options.level0_file_num_compaction_trigger - 1) {
2136  ASSERT_EQ(NumTableFilesAtLevel(0), num + 1);
2137  }
2138  }
2139 
2140  dbfull()->TEST_WaitForCompact();
2141  ASSERT_EQ(NumTableFilesAtLevel(0), 1);
2142  for (int i = 1; i < options.num_levels ; i++) {
2143  ASSERT_EQ(NumTableFilesAtLevel(i), 0);
2144  }
2145 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
ConvertCompactionStyle   
)

Definition at line 2248 of file db_test.cc.

References ASSERT_EQ, ASSERT_GT, ASSERT_OK, ASSERT_TRUE, rocksdb::Options::compaction_style, rocksdb::Status::IsInvalidArgument(), kCompactionStyleUniversal, rocksdb::Iterator::key(), rocksdb::Options::level0_file_num_compaction_trigger, rocksdb::Options::max_bytes_for_level_base, rocksdb::Options::max_bytes_for_level_multiplier, rocksdb::Iterator::Next(), rocksdb::Options::num_levels, RandomString(), rocksdb::Iterator::SeekToFirst(), rocksdb::Options::target_file_size_base, rocksdb::Options::target_file_size_multiplier, rocksdb::Slice::ToString(), rocksdb::Iterator::Valid(), and rocksdb::Options::write_buffer_size.

2248  {
2249  Random rnd(301);
2250  int max_key_level_insert = 200;
2251  int max_key_universal_insert = 600;
2252 
2253  // Stage 1: generate a db with level compaction
2254  Options options = CurrentOptions();
2255  options.write_buffer_size = 100<<10; //100KB
2256  options.num_levels = 4;
2257  options.level0_file_num_compaction_trigger = 3;
2258  options.max_bytes_for_level_base = 500<<10; // 500KB
2259  options.max_bytes_for_level_multiplier = 1;
2260  options.target_file_size_base = 200<<10; // 200KB
2261  options.target_file_size_multiplier = 1;
2262  Reopen(&options);
2263 
2264  for (int i = 0; i <= max_key_level_insert; i++) {
2265  // each value is 10K
2266  ASSERT_OK(Put(Key(i), RandomString(&rnd, 10000)));
2267  }
2268  dbfull()->Flush(FlushOptions());
2269  dbfull()->TEST_WaitForCompact();
2270 
2271  ASSERT_GT(TotalTableFiles(), 1);
2272  int non_level0_num_files = 0;
2273  for (int i = 1; i < dbfull()->NumberLevels(); i++) {
2274  non_level0_num_files += NumTableFilesAtLevel(i);
2275  }
2276  ASSERT_GT(non_level0_num_files, 0);
2277 
2278  // Stage 2: reopen with universal compaction - should fail
2279  options = CurrentOptions();
2280  options.compaction_style = kCompactionStyleUniversal;
2281  Status s = TryReopen(&options);
2282  ASSERT_TRUE(s.IsInvalidArgument());
2283 
2284  // Stage 3: compact into a single file and move the file to level 0
2285  options = CurrentOptions();
2286  options.disable_auto_compactions = true;
2287  options.target_file_size_base = INT_MAX;
2288  options.target_file_size_multiplier = 1;
2289  options.max_bytes_for_level_base = INT_MAX;
2290  options.max_bytes_for_level_multiplier = 1;
2291  Reopen(&options);
2292 
2293  dbfull()->CompactRange(nullptr, nullptr,
2294  true /* reduce level */,
2295  0 /* reduce to level 0 */);
2296 
2297  for (int i = 0; i < dbfull()->NumberLevels(); i++) {
2298  int num = NumTableFilesAtLevel(i);
2299  if (i == 0) {
2300  ASSERT_EQ(num, 1);
2301  } else {
2302  ASSERT_EQ(num, 0);
2303  }
2304  }
2305 
2306  // Stage 4: re-open in universal compaction style and do some db operations
2307  options = CurrentOptions();
2308  options.compaction_style = kCompactionStyleUniversal;
2309  options.write_buffer_size = 100<<10; //100KB
2310  options.level0_file_num_compaction_trigger = 3;
2311  Reopen(&options);
2312 
2313  for (int i = max_key_level_insert / 2; i <= max_key_universal_insert; i++) {
2314  ASSERT_OK(Put(Key(i), RandomString(&rnd, 10000)));
2315  }
2316  dbfull()->Flush(FlushOptions());
2317  dbfull()->TEST_WaitForCompact();
2318 
2319  for (int i = 1; i < dbfull()->NumberLevels(); i++) {
2320  ASSERT_EQ(NumTableFilesAtLevel(i), 0);
2321  }
2322 
2323  // verify keys inserted in both level compaction style and universal
2324  // compaction style
2325  std::string keys_in_db;
2326  Iterator* iter = dbfull()->NewIterator(ReadOptions());
2327  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
2328  keys_in_db.append(iter->key().ToString());
2329  keys_in_db.push_back(',');
2330  }
2331  delete iter;
2332 
2333  std::string expected_keys;
2334  for (int i = 0; i <= max_key_universal_insert; i++) {
2335  expected_keys.append(Key(i));
2336  expected_keys.push_back(',');
2337  }
2338 
2339  ASSERT_EQ(keys_in_db, expected_keys);
2340 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
MinLevelToCompress1   
)

Definition at line 2408 of file db_test.cc.

References rocksdb::Options::compression_per_level, kNoCompression, MinLevelHelper(), MinLevelToCompress(), and rocksdb::Options::num_levels.

2408  {
2409  Options options = CurrentOptions();
2410  CompressionType type;
2411  if (!MinLevelToCompress(type, options, -14, -1, 0)) {
2412  return;
2413  }
2414  Reopen(&options);
2415  MinLevelHelper(this, options);
2416 
2417  // do not compress L0 and L1
2418  for (int i = 0; i < 2; i++) {
2419  options.compression_per_level[i] = kNoCompression;
2420  }
2421  for (int i = 2; i < options.num_levels; i++) {
2422  options.compression_per_level[i] = type;
2423  }
2424  DestroyAndReopen(&options);
2425  MinLevelHelper(this, options);
2426 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
MinLevelToCompress2   
)

Definition at line 2428 of file db_test.cc.

References rocksdb::Options::compression_per_level, kNoCompression, MinLevelHelper(), MinLevelToCompress(), and rocksdb::Options::num_levels.

2428  {
2429  Options options = CurrentOptions();
2430  CompressionType type;
2431  if (!MinLevelToCompress(type, options, 15, -1, 0)) {
2432  return;
2433  }
2434  Reopen(&options);
2435  MinLevelHelper(this, options);
2436 
2437  // do not compress L0 and L1
2438  for (int i = 0; i < 2; i++) {
2439  options.compression_per_level[i] = kNoCompression;
2440  }
2441  for (int i = 2; i < options.num_levels; i++) {
2442  options.compression_per_level[i] = type;
2443  }
2444  DestroyAndReopen(&options);
2445  MinLevelHelper(this, options);
2446 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
RepeatedWritesToSameKey   
)

Definition at line 2448 of file db_test.cc.

References ASSERT_LE, rocksdb::Options::env, env_, RandomString(), value, and rocksdb::Options::write_buffer_size.

2448  {
2449  do {
2450  Options options = CurrentOptions();
2451  options.env = env_;
2452  options.write_buffer_size = 100000; // Small write buffer
2453  Reopen(&options);
2454 
2455  // We must have at most one file per level except for level-0,
2456  // which may have up to kL0_StopWritesTrigger files.
2457  const int kMaxFiles = dbfull()->NumberLevels() +
2458  dbfull()->Level0StopWriteTrigger();
2459 
2460  Random rnd(301);
2461  std::string value = RandomString(&rnd, 2 * options.write_buffer_size);
2462  for (int i = 0; i < 5 * kMaxFiles; i++) {
2463  Put("key", value);
2464  ASSERT_LE(TotalTableFiles(), kMaxFiles);
2465  }
2466  } while (ChangeCompactOptions());
2467 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
InPlaceUpdate   
)

Definition at line 2469 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, rocksdb::Options::create_if_missing, rocksdb::Options::env, env_, rocksdb::Options::inplace_update_support, rocksdb::Iterator::key(), kTypeValue, rocksdb::Iterator::Next(), rocksdb::Status::ok(), ParseInternalKey(), rocksdb::Iterator::SeekToFirst(), rocksdb::ParsedInternalKey::sequence, rocksdb::Iterator::status(), rocksdb::Iterator::Valid(), value, and rocksdb::Options::write_buffer_size.

2469  {
2470  do {
2471  Options options = CurrentOptions();
2472  options.create_if_missing = true;
2473  options.inplace_update_support = true;
2474  options.env = env_;
2475  options.write_buffer_size = 100000;
2476 
2477  // Update key with values of smaller size
2478  Reopen(&options);
2479  int numValues = 10;
2480  for (int i = numValues; i > 0; i--) {
2481  std::string value = DummyString(i, 'a');
2482  ASSERT_OK(Put("key", value));
2483  ASSERT_EQ(value, Get("key"));
2484  }
2485 
2486  int count = 0;
2487  Iterator* iter = dbfull()->TEST_NewInternalIterator();
2488  iter->SeekToFirst();
2489  ASSERT_EQ(iter->status().ok(), true);
2490  while (iter->Valid()) {
2491  ParsedInternalKey ikey(Slice(), 0, kTypeValue);
2492  ikey.sequence = -1;
2493  ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true);
2494  count++;
2495  // All updates with the same sequence number.
2496  ASSERT_EQ(ikey.sequence, (unsigned)1);
2497  iter->Next();
2498  }
2499  // Only 1 instance for that key.
2500  ASSERT_EQ(count, 1);
2501  delete iter;
2502 
2503  // Update key with values of larger size
2504  DestroyAndReopen(&options);
2505  numValues = 10;
2506  for (int i = 0; i < numValues; i++) {
2507  std::string value = DummyString(i, 'a');
2508  ASSERT_OK(Put("key", value));
2509  ASSERT_EQ(value, Get("key"));
2510  }
2511 
2512  count = 0;
2513  iter = dbfull()->TEST_NewInternalIterator();
2514  iter->SeekToFirst();
2515  ASSERT_EQ(iter->status().ok(), true);
2516  int seq = numValues;
2517  while (iter->Valid()) {
2518  ParsedInternalKey ikey(Slice(), 0, kTypeValue);
2519  ikey.sequence = -1;
2520  ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true);
2521  count++;
2522  // No inplace updates. All updates are puts with new seq number
2523  ASSERT_EQ(ikey.sequence, (unsigned)seq--);
2524  iter->Next();
2525  }
2526  // All 10 updates exist in the internal iterator
2527  ASSERT_EQ(count, numValues);
2528  delete iter;
2529 
2530 
2531  } while (ChangeCompactOptions());
2532 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
CompactionFilter   
)

Definition at line 2631 of file db_test.cc.

References ASSERT_EQ, ASSERT_NE, ASSERT_OK, cfilter_count, rocksdb::Options::compaction_filter_factory, rocksdb::Options::create_if_missing, db_, rocksdb::Iterator::key(), kTypeValue, rocksdb::Options::max_mem_compaction_level, rocksdb::DBImpl::NewIterator(), rocksdb::Iterator::Next(), rocksdb::Options::num_levels, ParseInternalKey(), rocksdb::Iterator::SeekToFirst(), rocksdb::ParsedInternalKey::sequence, rocksdb::Iterator::status(), rocksdb::Iterator::Valid(), and value.

2631  {
2632  Options options = CurrentOptions();
2633  options.num_levels = 3;
2634  options.max_mem_compaction_level = 0;
2635  options.compaction_filter_factory = std::make_shared<KeepFilterFactory>();
2636  Reopen(&options);
2637 
2638  // Write 100K keys, these are written to a few files in L0.
2639  const std::string value(10, 'x');
2640  for (int i = 0; i < 100000; i++) {
2641  char key[100];
2642  snprintf(key, sizeof(key), "B%010d", i);
2643  Put(key, value);
2644  }
2645  dbfull()->TEST_FlushMemTable();
2646 
2647  // Push all files to the highest level L2. Verify that
2648  // the compaction is each level invokes the filter for
2649  // all the keys in that level.
2650  cfilter_count = 0;
2651  dbfull()->TEST_CompactRange(0, nullptr, nullptr);
2652  ASSERT_EQ(cfilter_count, 100000);
2653  cfilter_count = 0;
2654  dbfull()->TEST_CompactRange(1, nullptr, nullptr);
2655  ASSERT_EQ(cfilter_count, 100000);
2656 
2657  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
2658  ASSERT_EQ(NumTableFilesAtLevel(1), 0);
2659  ASSERT_NE(NumTableFilesAtLevel(2), 0);
2660  cfilter_count = 0;
2661 
2662  // All the files are in the lowest level.
2663  // Verify that all but the 100001st record
2664  // has sequence number zero. The 100001st record
2665  // is at the tip of this snapshot and cannot
2666  // be zeroed out.
2667  // TODO: figure out sequence number squashtoo
2668  int count = 0;
2669  int total = 0;
2670  Iterator* iter = dbfull()->TEST_NewInternalIterator();
2671  iter->SeekToFirst();
2672  ASSERT_OK(iter->status());
2673  while (iter->Valid()) {
2674  ParsedInternalKey ikey(Slice(), 0, kTypeValue);
2675  ikey.sequence = -1;
2676  ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true);
2677  total++;
2678  if (ikey.sequence != 0) {
2679  count++;
2680  }
2681  iter->Next();
2682  }
2683  ASSERT_EQ(total, 100000);
2684  ASSERT_EQ(count, 1);
2685  delete iter;
2686 
2687  // overwrite all the 100K keys once again.
2688  for (int i = 0; i < 100000; i++) {
2689  char key[100];
2690  snprintf(key, sizeof(key), "B%010d", i);
2691  Put(key, value);
2692  }
2693  dbfull()->TEST_FlushMemTable();
2694 
2695  // push all files to the highest level L2. This
2696  // means that all keys should pass at least once
2697  // via the compaction filter
2698  cfilter_count = 0;
2699  dbfull()->TEST_CompactRange(0, nullptr, nullptr);
2700  ASSERT_EQ(cfilter_count, 100000);
2701  cfilter_count = 0;
2702  dbfull()->TEST_CompactRange(1, nullptr, nullptr);
2703  ASSERT_EQ(cfilter_count, 100000);
2704  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
2705  ASSERT_EQ(NumTableFilesAtLevel(1), 0);
2706  ASSERT_NE(NumTableFilesAtLevel(2), 0);
2707 
2708  // create a new database with the compaction
2709  // filter in such a way that it deletes all keys
2710  options.compaction_filter_factory = std::make_shared<DeleteFilterFactory>();
2711  options.create_if_missing = true;
2712  DestroyAndReopen(&options);
2713 
2714  // write all the keys once again.
2715  for (int i = 0; i < 100000; i++) {
2716  char key[100];
2717  snprintf(key, sizeof(key), "B%010d", i);
2718  Put(key, value);
2719  }
2720  dbfull()->TEST_FlushMemTable();
2721  ASSERT_NE(NumTableFilesAtLevel(0), 0);
2722  ASSERT_EQ(NumTableFilesAtLevel(1), 0);
2723  ASSERT_EQ(NumTableFilesAtLevel(2), 0);
2724 
2725  // Push all files to the highest level L2. This
2726  // triggers the compaction filter to delete all keys,
2727  // verify that at the end of the compaction process,
2728  // nothing is left.
2729  cfilter_count = 0;
2730  dbfull()->TEST_CompactRange(0, nullptr, nullptr);
2731  ASSERT_EQ(cfilter_count, 100000);
2732  cfilter_count = 0;
2733  dbfull()->TEST_CompactRange(1, nullptr, nullptr);
2735  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
2736  ASSERT_EQ(NumTableFilesAtLevel(1), 0);
2737 
2738  // Scan the entire database to ensure that nothing is left
2739  iter = db_->NewIterator(ReadOptions());
2740  iter->SeekToFirst();
2741  count = 0;
2742  while (iter->Valid()) {
2743  count++;
2744  iter->Next();
2745  }
2746  ASSERT_EQ(count, 0);
2747  delete iter;
2748 
2749  // The sequence number of the remaining record
2750  // is not zeroed out even though it is at the
2751  // level Lmax because this record is at the tip
2752  // TODO: remove the following or design a different
2753  // test
2754  count = 0;
2755  iter = dbfull()->TEST_NewInternalIterator();
2756  iter->SeekToFirst();
2757  ASSERT_OK(iter->status());
2758  while (iter->Valid()) {
2759  ParsedInternalKey ikey(Slice(), 0, kTypeValue);
2760  ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true);
2761  ASSERT_NE(ikey.sequence, (unsigned)0);
2762  count++;
2763  iter->Next();
2764  }
2765  ASSERT_EQ(count, 0);
2766  delete iter;
2767 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
CompactionFilterWithValueChange   
)

Definition at line 2769 of file db_test.cc.

References ASSERT_EQ, rocksdb::Options::compaction_filter_factory, rocksdb::Options::max_mem_compaction_level, NEW_VALUE, rocksdb::Options::num_levels, and value.

2769  {
2770  do {
2771  Options options = CurrentOptions();
2772  options.num_levels = 3;
2773  options.max_mem_compaction_level = 0;
2774  options.compaction_filter_factory =
2775  std::make_shared<ChangeFilterFactory>(100);
2776  Reopen(&options);
2777 
2778  // Write 100K+1 keys, these are written to a few files
2779  // in L0. We do this so that the current snapshot points
2780  // to the 100001 key.The compaction filter is not invoked
2781  // on keys that are visible via a snapshot because we
2782  // anyways cannot delete it.
2783  const std::string value(10, 'x');
2784  for (int i = 0; i < 100001; i++) {
2785  char key[100];
2786  snprintf(key, sizeof(key), "B%010d", i);
2787  Put(key, value);
2788  }
2789 
2790  // push all files to lower levels
2791  dbfull()->TEST_FlushMemTable();
2792  dbfull()->TEST_CompactRange(0, nullptr, nullptr);
2793  dbfull()->TEST_CompactRange(1, nullptr, nullptr);
2794 
2795  // re-write all data again
2796  for (int i = 0; i < 100001; i++) {
2797  char key[100];
2798  snprintf(key, sizeof(key), "B%010d", i);
2799  Put(key, value);
2800  }
2801 
2802  // push all files to lower levels. This should
2803  // invoke the compaction filter for all 100000 keys.
2804  dbfull()->TEST_FlushMemTable();
2805  dbfull()->TEST_CompactRange(0, nullptr, nullptr);
2806  dbfull()->TEST_CompactRange(1, nullptr, nullptr);
2807 
2808  // verify that all keys now have the new value that
2809  // was set by the compaction process.
2810  for (int i = 0; i < 100000; i++) {
2811  char key[100];
2812  snprintf(key, sizeof(key), "B%010d", i);
2813  std::string newvalue = Get(key);
2814  ASSERT_EQ(newvalue.compare(NEW_VALUE), 0);
2815  }
2816  } while (ChangeCompactOptions());
2817 }
rocksdb::TEST ( DBTest  ,
SparseMerge   
)

Definition at line 2819 of file db_test.cc.

References ASSERT_LE, rocksdb::Options::compression, kNoCompression, and value.

2819  {
2820  do {
2821  Options options = CurrentOptions();
2822  options.compression = kNoCompression;
2823  Reopen(&options);
2824 
2825  FillLevels("A", "Z");
2826 
2827  // Suppose there is:
2828  // small amount of data with prefix A
2829  // large amount of data with prefix B
2830  // small amount of data with prefix C
2831  // and that recent updates have made small changes to all three prefixes.
2832  // Check that we do not do a compaction that merges all of B in one shot.
2833  const std::string value(1000, 'x');
2834  Put("A", "va");
2835  // Write approximately 100MB of "B" values
2836  for (int i = 0; i < 100000; i++) {
2837  char key[100];
2838  snprintf(key, sizeof(key), "B%010d", i);
2839  Put(key, value);
2840  }
2841  Put("C", "vc");
2842  dbfull()->TEST_FlushMemTable();
2843  dbfull()->TEST_CompactRange(0, nullptr, nullptr);
2844 
2845  // Make sparse update
2846  Put("A", "va2");
2847  Put("B100", "bvalue2");
2848  Put("C", "vc2");
2849  dbfull()->TEST_FlushMemTable();
2850 
2851  // Compactions should not cause us to create a situation where
2852  // a file overlaps too much data at the next level.
2853  ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
2854  dbfull()->TEST_CompactRange(0, nullptr, nullptr);
2855  ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
2856  dbfull()->TEST_CompactRange(1, nullptr, nullptr);
2857  ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
2858  } while (ChangeCompactOptions());
2859 }
rocksdb::TEST ( DBTest  ,
ApproximateSizes   
)

Definition at line 2872 of file db_test.cc.

References ASSERT_EQ, ASSERT_GT, ASSERT_OK, ASSERT_TRUE, Between(), rocksdb::Options::compression, kNoCompression, RandomString(), ripple::run(), and rocksdb::Options::write_buffer_size.

2872  {
2873  do {
2874  Options options = CurrentOptions();
2875  options.write_buffer_size = 100000000; // Large write buffer
2876  options.compression = kNoCompression;
2877  DestroyAndReopen();
2878 
2879  ASSERT_TRUE(Between(Size("", "xyz"), 0, 0));
2880  Reopen(&options);
2881  ASSERT_TRUE(Between(Size("", "xyz"), 0, 0));
2882 
2883  // Write 8MB (80 values, each 100K)
2884  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
2885  const int N = 80;
2886  static const int S1 = 100000;
2887  static const int S2 = 105000; // Allow some expansion from metadata
2888  Random rnd(301);
2889  for (int i = 0; i < N; i++) {
2890  ASSERT_OK(Put(Key(i), RandomString(&rnd, S1)));
2891  }
2892 
2893  // 0 because GetApproximateSizes() does not account for memtable space
2894  ASSERT_TRUE(Between(Size("", Key(50)), 0, 0));
2895 
2896  // Check sizes across recovery by reopening a few times
2897  for (int run = 0; run < 3; run++) {
2898  Reopen(&options);
2899 
2900  for (int compact_start = 0; compact_start < N; compact_start += 10) {
2901  for (int i = 0; i < N; i += 10) {
2902  ASSERT_TRUE(Between(Size("", Key(i)), S1*i, S2*i));
2903  ASSERT_TRUE(Between(Size("", Key(i)+".suffix"), S1*(i+1), S2*(i+1)));
2904  ASSERT_TRUE(Between(Size(Key(i), Key(i+10)), S1*10, S2*10));
2905  }
2906  ASSERT_TRUE(Between(Size("", Key(50)), S1*50, S2*50));
2907  ASSERT_TRUE(Between(Size("", Key(50)+".suffix"), S1*50, S2*50));
2908 
2909  std::string cstart_str = Key(compact_start);
2910  std::string cend_str = Key(compact_start + 9);
2911  Slice cstart = cstart_str;
2912  Slice cend = cend_str;
2913  dbfull()->TEST_CompactRange(0, &cstart, &cend);
2914  }
2915 
2916  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
2917  ASSERT_GT(NumTableFilesAtLevel(1), 0);
2918  }
2919  } while (ChangeOptions(kSkipUniversalCompaction));
2920 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
ApproximateSizes_MixOfSmallAndLarge   
)

Definition at line 2922 of file db_test.cc.

References ASSERT_OK, ASSERT_TRUE, Between(), rocksdb::Options::compression, kNoCompression, RandomString(), and ripple::run().

2922  {
2923  do {
2924  Options options = CurrentOptions();
2925  options.compression = kNoCompression;
2926  Reopen();
2927 
2928  Random rnd(301);
2929  std::string big1 = RandomString(&rnd, 100000);
2930  ASSERT_OK(Put(Key(0), RandomString(&rnd, 10000)));
2931  ASSERT_OK(Put(Key(1), RandomString(&rnd, 10000)));
2932  ASSERT_OK(Put(Key(2), big1));
2933  ASSERT_OK(Put(Key(3), RandomString(&rnd, 10000)));
2934  ASSERT_OK(Put(Key(4), big1));
2935  ASSERT_OK(Put(Key(5), RandomString(&rnd, 10000)));
2936  ASSERT_OK(Put(Key(6), RandomString(&rnd, 300000)));
2937  ASSERT_OK(Put(Key(7), RandomString(&rnd, 10000)));
2938 
2939  // Check sizes across recovery by reopening a few times
2940  for (int run = 0; run < 3; run++) {
2941  Reopen(&options);
2942 
2943  ASSERT_TRUE(Between(Size("", Key(0)), 0, 0));
2944  ASSERT_TRUE(Between(Size("", Key(1)), 10000, 11000));
2945  ASSERT_TRUE(Between(Size("", Key(2)), 20000, 21000));
2946  ASSERT_TRUE(Between(Size("", Key(3)), 120000, 121000));
2947  ASSERT_TRUE(Between(Size("", Key(4)), 130000, 131000));
2948  ASSERT_TRUE(Between(Size("", Key(5)), 230000, 231000));
2949  ASSERT_TRUE(Between(Size("", Key(6)), 240000, 241000));
2950  ASSERT_TRUE(Between(Size("", Key(7)), 540000, 541000));
2951  ASSERT_TRUE(Between(Size("", Key(8)), 550000, 560000));
2952 
2953  ASSERT_TRUE(Between(Size(Key(3), Key(5)), 110000, 111000));
2954 
2955  dbfull()->TEST_CompactRange(0, nullptr, nullptr);
2956  }
2957  } while (ChangeOptions());
2958 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
IteratorPinsRef   
)

Definition at line 2960 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, ASSERT_TRUE, db_, rocksdb::Iterator::key(), rocksdb::DBImpl::NewIterator(), rocksdb::Iterator::Next(), rocksdb::Iterator::SeekToFirst(), rocksdb::Slice::ToString(), rocksdb::Iterator::Valid(), and rocksdb::Iterator::value().

2960  {
2961  do {
2962  Put("foo", "hello");
2963 
2964  // Get iterator that will yield the current contents of the DB.
2965  Iterator* iter = db_->NewIterator(ReadOptions());
2966 
2967  // Write to force compactions
2968  Put("foo", "newvalue1");
2969  for (int i = 0; i < 100; i++) {
2970  ASSERT_OK(Put(Key(i), Key(i) + std::string(100000, 'v'))); // 100K values
2971  }
2972  Put("foo", "newvalue2");
2973 
2974  iter->SeekToFirst();
2975  ASSERT_TRUE(iter->Valid());
2976  ASSERT_EQ("foo", iter->key().ToString());
2977  ASSERT_EQ("hello", iter->value().ToString());
2978  iter->Next();
2979  ASSERT_TRUE(!iter->Valid());
2980  delete iter;
2981  } while (ChangeCompactOptions());
2982 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
Snapshot   
)

Definition at line 2984 of file db_test.cc.

References ASSERT_EQ, db_, rocksdb::DBImpl::GetSnapshot(), and rocksdb::DBImpl::ReleaseSnapshot().

2984  {
2985  do {
2986  Put("foo", "v1");
2987  const Snapshot* s1 = db_->GetSnapshot();
2988  Put("foo", "v2");
2989  const Snapshot* s2 = db_->GetSnapshot();
2990  Put("foo", "v3");
2991  const Snapshot* s3 = db_->GetSnapshot();
2992 
2993  Put("foo", "v4");
2994  ASSERT_EQ("v1", Get("foo", s1));
2995  ASSERT_EQ("v2", Get("foo", s2));
2996  ASSERT_EQ("v3", Get("foo", s3));
2997  ASSERT_EQ("v4", Get("foo"));
2998 
2999  db_->ReleaseSnapshot(s3);
3000  ASSERT_EQ("v1", Get("foo", s1));
3001  ASSERT_EQ("v2", Get("foo", s2));
3002  ASSERT_EQ("v4", Get("foo"));
3003 
3004  db_->ReleaseSnapshot(s1);
3005  ASSERT_EQ("v2", Get("foo", s2));
3006  ASSERT_EQ("v4", Get("foo"));
3007 
3008  db_->ReleaseSnapshot(s2);
3009  ASSERT_EQ("v4", Get("foo"));
3010  } while (ChangeOptions());
3011 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
HiddenValuesAreRemoved   
)

Definition at line 3013 of file db_test.cc.

References ASSERT_EQ, ASSERT_GE, ASSERT_GT, ASSERT_OK, ASSERT_TRUE, Between(), db_, rocksdb::DBImpl::GetSnapshot(), RandomString(), and rocksdb::DBImpl::ReleaseSnapshot().

3013  {
3014  do {
3015  Random rnd(301);
3016  FillLevels("a", "z");
3017 
3018  std::string big = RandomString(&rnd, 50000);
3019  Put("foo", big);
3020  Put("pastfoo", "v");
3021  const Snapshot* snapshot = db_->GetSnapshot();
3022  Put("foo", "tiny");
3023  Put("pastfoo2", "v2"); // Advance sequence number one more
3024 
3025  ASSERT_OK(dbfull()->TEST_FlushMemTable());
3026  ASSERT_GT(NumTableFilesAtLevel(0), 0);
3027 
3028  ASSERT_EQ(big, Get("foo", snapshot));
3029  ASSERT_TRUE(Between(Size("", "pastfoo"), 50000, 60000));
3030  db_->ReleaseSnapshot(snapshot);
3031  ASSERT_EQ(AllEntriesFor("foo"), "[ tiny, " + big + " ]");
3032  Slice x("x");
3033  dbfull()->TEST_CompactRange(0, nullptr, &x);
3034  ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
3035  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
3036  ASSERT_GE(NumTableFilesAtLevel(1), 1);
3037  dbfull()->TEST_CompactRange(1, nullptr, &x);
3038  ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
3039 
3040  ASSERT_TRUE(Between(Size("", "pastfoo"), 0, 1000));
3041  } while (ChangeOptions(kSkipUniversalCompaction));
3042 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
CompactBetweenSnapshots   
)

Definition at line 3044 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, db_, rocksdb::DBImpl::GetSnapshot(), and rocksdb::DBImpl::ReleaseSnapshot().

3044  {
3045  do {
3046  Random rnd(301);
3047  FillLevels("a", "z");
3048 
3049  Put("foo", "first");
3050  const Snapshot* snapshot1 = db_->GetSnapshot();
3051  Put("foo", "second");
3052  Put("foo", "third");
3053  Put("foo", "fourth");
3054  const Snapshot* snapshot2 = db_->GetSnapshot();
3055  Put("foo", "fifth");
3056  Put("foo", "sixth");
3057 
3058  // All entries (including duplicates) exist
3059  // before any compaction is triggered.
3060  ASSERT_OK(dbfull()->TEST_FlushMemTable());
3061  ASSERT_EQ("sixth", Get("foo"));
3062  ASSERT_EQ("fourth", Get("foo", snapshot2));
3063  ASSERT_EQ("first", Get("foo", snapshot1));
3064  ASSERT_EQ(AllEntriesFor("foo"),
3065  "[ sixth, fifth, fourth, third, second, first ]");
3066 
3067  // After a compaction, "second", "third" and "fifth" should
3068  // be removed
3069  FillLevels("a", "z");
3070  dbfull()->CompactRange(nullptr, nullptr);
3071  ASSERT_EQ("sixth", Get("foo"));
3072  ASSERT_EQ("fourth", Get("foo", snapshot2));
3073  ASSERT_EQ("first", Get("foo", snapshot1));
3074  ASSERT_EQ(AllEntriesFor("foo"), "[ sixth, fourth, first ]");
3075 
3076  // after we release the snapshot1, only two values left
3077  db_->ReleaseSnapshot(snapshot1);
3078  FillLevels("a", "z");
3079  dbfull()->CompactRange(nullptr, nullptr);
3080 
3081  // We have only one valid snapshot snapshot2. Since snapshot1 is
3082  // not valid anymore, "first" should be removed by a compaction.
3083  ASSERT_EQ("sixth", Get("foo"));
3084  ASSERT_EQ("fourth", Get("foo", snapshot2));
3085  ASSERT_EQ(AllEntriesFor("foo"), "[ sixth, fourth ]");
3086 
3087  // after we release the snapshot2, only one value should be left
3088  db_->ReleaseSnapshot(snapshot2);
3089  FillLevels("a", "z");
3090  dbfull()->CompactRange(nullptr, nullptr);
3091  ASSERT_EQ("sixth", Get("foo"));
3092  ASSERT_EQ(AllEntriesFor("foo"), "[ sixth ]");
3093 
3094  } while (ChangeOptions());
3095 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
DeletionMarkers1   
)

Definition at line 3097 of file db_test.cc.

References ASSERT_EQ, and ASSERT_OK.

3097  {
3098  Put("foo", "v1");
3099  ASSERT_OK(dbfull()->TEST_FlushMemTable());
3100  const int last = dbfull()->MaxMemCompactionLevel();
3101  ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
3102 
3103  // Place a table at level last-1 to prevent merging with preceding mutation
3104  Put("a", "begin");
3105  Put("z", "end");
3106  dbfull()->TEST_FlushMemTable();
3107  ASSERT_EQ(NumTableFilesAtLevel(last), 1);
3108  ASSERT_EQ(NumTableFilesAtLevel(last-1), 1);
3109 
3110  Delete("foo");
3111  Put("foo", "v2");
3112  ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
3113  ASSERT_OK(dbfull()->TEST_FlushMemTable()); // Moves to level last-2
3114  if (CurrentOptions().purge_redundant_kvs_while_flush) {
3115  ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]");
3116  } else {
3117  ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
3118  }
3119  Slice z("z");
3120  dbfull()->TEST_CompactRange(last-2, nullptr, &z);
3121  // DEL eliminated, but v1 remains because we aren't compacting that level
3122  // (DEL can be eliminated because v2 hides v1).
3123  ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]");
3124  dbfull()->TEST_CompactRange(last-1, nullptr, nullptr);
3125  // Merging last-1 w/ last, so we are the base level for "foo", so
3126  // DEL is removed. (as is v1).
3127  ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]");
3128 }
rocksdb::TEST ( DBTest  ,
DeletionMarkers2   
)

Definition at line 3130 of file db_test.cc.

References ASSERT_EQ, and ASSERT_OK.

3130  {
3131  Put("foo", "v1");
3132  ASSERT_OK(dbfull()->TEST_FlushMemTable());
3133  const int last = dbfull()->MaxMemCompactionLevel();
3134  ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
3135 
3136  // Place a table at level last-1 to prevent merging with preceding mutation
3137  Put("a", "begin");
3138  Put("z", "end");
3139  dbfull()->TEST_FlushMemTable();
3140  ASSERT_EQ(NumTableFilesAtLevel(last), 1);
3141  ASSERT_EQ(NumTableFilesAtLevel(last-1), 1);
3142 
3143  Delete("foo");
3144  ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
3145  ASSERT_OK(dbfull()->TEST_FlushMemTable()); // Moves to level last-2
3146  ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
3147  dbfull()->TEST_CompactRange(last-2, nullptr, nullptr);
3148  // DEL kept: "last" file overlaps
3149  ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
3150  dbfull()->TEST_CompactRange(last-1, nullptr, nullptr);
3151  // Merging last-1 w/ last, so we are the base level for "foo", so
3152  // DEL is removed. (as is v1).
3153  ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
3154 }
rocksdb::TEST ( DBTest  ,
OverlapInLevel0   
)

Definition at line 3156 of file db_test.cc.

References ASSERT_EQ, and ASSERT_OK.

3156  {
3157  do {
3158  int tmp = dbfull()->MaxMemCompactionLevel();
3159  ASSERT_EQ(tmp, 2) << "Fix test to match config";
3160 
3161  //Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0.
3162  ASSERT_OK(Put("100", "v100"));
3163  ASSERT_OK(Put("999", "v999"));
3164  dbfull()->TEST_FlushMemTable();
3165  ASSERT_OK(Delete("100"));
3166  ASSERT_OK(Delete("999"));
3167  dbfull()->TEST_FlushMemTable();
3168  ASSERT_EQ("0,1,1", FilesPerLevel());
3169 
3170  // Make files spanning the following ranges in level-0:
3171  // files[0] 200 .. 900
3172  // files[1] 300 .. 500
3173  // Note that files are sorted by smallest key.
3174  ASSERT_OK(Put("300", "v300"));
3175  ASSERT_OK(Put("500", "v500"));
3176  dbfull()->TEST_FlushMemTable();
3177  ASSERT_OK(Put("200", "v200"));
3178  ASSERT_OK(Put("600", "v600"));
3179  ASSERT_OK(Put("900", "v900"));
3180  dbfull()->TEST_FlushMemTable();
3181  ASSERT_EQ("2,1,1", FilesPerLevel());
3182 
3183  // Compact away the placeholder files we created initially
3184  dbfull()->TEST_CompactRange(1, nullptr, nullptr);
3185  dbfull()->TEST_CompactRange(2, nullptr, nullptr);
3186  ASSERT_EQ("2", FilesPerLevel());
3187 
3188  // Do a memtable compaction. Before bug-fix, the compaction would
3189  // not detect the overlap with level-0 files and would incorrectly place
3190  // the deletion in a deeper level.
3191  ASSERT_OK(Delete("600"));
3192  dbfull()->TEST_FlushMemTable();
3193  ASSERT_EQ("3", FilesPerLevel());
3194  ASSERT_EQ("NOT_FOUND", Get("600"));
3195  } while (ChangeOptions(kSkipUniversalCompaction));
3196 }
rocksdb::TEST ( DBTest  ,
L0_CompactionBug_Issue44_a   
)

Definition at line 3198 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, env_, and rocksdb::Env::SleepForMicroseconds().

3198  {
3199  do {
3200  Reopen();
3201  ASSERT_OK(Put("b", "v"));
3202  Reopen();
3203  ASSERT_OK(Delete("b"));
3204  ASSERT_OK(Delete("a"));
3205  Reopen();
3206  ASSERT_OK(Delete("a"));
3207  Reopen();
3208  ASSERT_OK(Put("a", "v"));
3209  Reopen();
3210  Reopen();
3211  ASSERT_EQ("(a->v)", Contents());
3212  env_->SleepForMicroseconds(1000000); // Wait for compaction to finish
3213  ASSERT_EQ("(a->v)", Contents());
3214  } while (ChangeCompactOptions());
3215 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
L0_CompactionBug_Issue44_b   
)

Definition at line 3217 of file db_test.cc.

References ASSERT_EQ, env_, and rocksdb::Env::SleepForMicroseconds().

3217  {
3218  do {
3219  Reopen();
3220  Put("","");
3221  Reopen();
3222  Delete("e");
3223  Put("","");
3224  Reopen();
3225  Put("c", "cv");
3226  Reopen();
3227  Put("","");
3228  Reopen();
3229  Put("","");
3230  env_->SleepForMicroseconds(1000000); // Wait for compaction to finish
3231  Reopen();
3232  Put("d","dv");
3233  Reopen();
3234  Put("","");
3235  Reopen();
3236  Delete("d");
3237  Delete("b");
3238  Reopen();
3239  ASSERT_EQ("(->)(c->cv)", Contents());
3240  env_->SleepForMicroseconds(1000000); // Wait for compaction to finish
3241  ASSERT_EQ("(->)(c->cv)", Contents());
3242  } while (ChangeCompactOptions());
3243 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
ComparatorCheck   
)

Definition at line 3245 of file db_test.cc.

References ASSERT_TRUE, BytewiseComparator(), rocksdb::Options::comparator, rocksdb::Comparator::Compare(), rocksdb::Comparator::FindShortestSeparator(), rocksdb::Comparator::FindShortSuccessor(), rocksdb::Status::ok(), and rocksdb::Status::ToString().

3245  {
3246  class NewComparator : public Comparator {
3247  public:
3248  virtual const char* Name() const { return "rocksdb.NewComparator"; }
3249  virtual int Compare(const Slice& a, const Slice& b) const {
3250  return BytewiseComparator()->Compare(a, b);
3251  }
3252  virtual void FindShortestSeparator(std::string* s, const Slice& l) const {
3254  }
3255  virtual void FindShortSuccessor(std::string* key) const {
3257  }
3258  };
3259  Options new_options;
3260  NewComparator cmp;
3261  do {
3262  new_options = CurrentOptions();
3263  new_options.comparator = &cmp;
3264  Status s = TryReopen(&new_options);
3265  ASSERT_TRUE(!s.ok());
3266  ASSERT_TRUE(s.ToString().find("comparator") != std::string::npos)
3267  << s.ToString();
3268  } while (ChangeCompactOptions(&new_options));
3269 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
CustomComparator   
)

Definition at line 3271 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, ASSERT_TRUE, rocksdb::Options::comparator, rocksdb::Options::create_if_missing, EscapeString(), rocksdb::Options::filter_policy, ripple::run(), rocksdb::Slice::size(), rocksdb::Slice::ToString(), and rocksdb::Options::write_buffer_size.

3271  {
3272  class NumberComparator : public Comparator {
3273  public:
3274  virtual const char* Name() const { return "test.NumberComparator"; }
3275  virtual int Compare(const Slice& a, const Slice& b) const {
3276  return ToNumber(a) - ToNumber(b);
3277  }
3278  virtual void FindShortestSeparator(std::string* s, const Slice& l) const {
3279  ToNumber(*s); // Check format
3280  ToNumber(l); // Check format
3281  }
3282  virtual void FindShortSuccessor(std::string* key) const {
3283  ToNumber(*key); // Check format
3284  }
3285  private:
3286  static int ToNumber(const Slice& x) {
3287  // Check that there are no extra characters.
3288  ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size()-1] == ']')
3289  << EscapeString(x);
3290  int val;
3291  char ignored;
3292  ASSERT_TRUE(sscanf(x.ToString().c_str(), "[%i]%c", &val, &ignored) == 1)
3293  << EscapeString(x);
3294  return val;
3295  }
3296  };
3297  Options new_options;
3298  NumberComparator cmp;
3299  do {
3300  new_options = CurrentOptions();
3301  new_options.create_if_missing = true;
3302  new_options.comparator = &cmp;
3303  new_options.filter_policy = nullptr; // Cannot use bloom filters
3304  new_options.write_buffer_size = 1000; // Compact more often
3305  DestroyAndReopen(&new_options);
3306  ASSERT_OK(Put("[10]", "ten"));
3307  ASSERT_OK(Put("[0x14]", "twenty"));
3308  for (int i = 0; i < 2; i++) {
3309  ASSERT_EQ("ten", Get("[10]"));
3310  ASSERT_EQ("ten", Get("[0xa]"));
3311  ASSERT_EQ("twenty", Get("[20]"));
3312  ASSERT_EQ("twenty", Get("[0x14]"));
3313  ASSERT_EQ("NOT_FOUND", Get("[15]"));
3314  ASSERT_EQ("NOT_FOUND", Get("[0xf]"));
3315  Compact("[0]", "[9999]");
3316  }
3317 
3318  for (int run = 0; run < 2; run++) {
3319  for (int i = 0; i < 1000; i++) {
3320  char buf[100];
3321  snprintf(buf, sizeof(buf), "[%d]", i*10);
3322  ASSERT_OK(Put(buf, buf));
3323  }
3324  Compact("[0]", "[1000000]");
3325  }
3326  } while (ChangeCompactOptions(&new_options));
3327 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
ManualCompaction   
)

Definition at line 3329 of file db_test.cc.

References ASSERT_EQ, rocksdb::DBImpl::CompactRange(), and db_.

3329  {
3330  ASSERT_EQ(dbfull()->MaxMemCompactionLevel(), 2)
3331  << "Need to update this test to match kMaxMemCompactLevel";
3332 
3333  MakeTables(3, "p", "q");
3334  ASSERT_EQ("1,1,1", FilesPerLevel());
3335 
3336  // Compaction range falls before files
3337  Compact("", "c");
3338  ASSERT_EQ("1,1,1", FilesPerLevel());
3339 
3340  // Compaction range falls after files
3341  Compact("r", "z");
3342  ASSERT_EQ("1,1,1", FilesPerLevel());
3343 
3344  // Compaction range overlaps files
3345  Compact("p1", "p9");
3346  ASSERT_EQ("0,0,1", FilesPerLevel());
3347 
3348  // Populate a different range
3349  MakeTables(3, "c", "e");
3350  ASSERT_EQ("1,1,2", FilesPerLevel());
3351 
3352  // Compact just the new range
3353  Compact("b", "f");
3354  ASSERT_EQ("0,0,2", FilesPerLevel());
3355 
3356  // Compact all
3357  MakeTables(1, "a", "z");
3358  ASSERT_EQ("0,1,2", FilesPerLevel());
3359  db_->CompactRange(nullptr, nullptr);
3360  ASSERT_EQ("0,0,1", FilesPerLevel());
3361 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
DBOpen_Options   
)

Definition at line 3363 of file db_test.cc.

References ASSERT_OK, ASSERT_TRUE, rocksdb::Options::create_if_missing, db, dbname, DestroyDB(), rocksdb::Options::error_if_exists, rocksdb::DB::Open(), rocksdb::test::TmpDir(), and rocksdb::Status::ToString().

3363  {
3364  std::string dbname = test::TmpDir() + "/db_options_test";
3365  ASSERT_OK(DestroyDB(dbname, Options()));
3366 
3367  // Does not exist, and create_if_missing == false: error
3368  DB* db = nullptr;
3369  Options opts;
3370  opts.create_if_missing = false;
3371  Status s = DB::Open(opts, dbname, &db);
3372  ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != nullptr);
3373  ASSERT_TRUE(db == nullptr);
3374 
3375  // Does not exist, and create_if_missing == true: OK
3376  opts.create_if_missing = true;
3377  s = DB::Open(opts, dbname, &db);
3378  ASSERT_OK(s);
3379  ASSERT_TRUE(db != nullptr);
3380 
3381  delete db;
3382  db = nullptr;
3383 
3384  // Does exist, and error_if_exists == true: error
3385  opts.create_if_missing = false;
3386  opts.error_if_exists = true;
3387  s = DB::Open(opts, dbname, &db);
3388  ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != nullptr);
3389  ASSERT_TRUE(db == nullptr);
3390 
3391  // Does exist, and error_if_exists == false: OK
3392  opts.create_if_missing = true;
3393  opts.error_if_exists = false;
3394  s = DB::Open(opts, dbname, &db);
3395  ASSERT_OK(s);
3396  ASSERT_TRUE(db != nullptr);
3397 
3398  delete db;
3399  db = nullptr;
3400 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
DBOpen_Change_NumLevels   
)

Definition at line 3402 of file db_test.cc.

References ASSERT_OK, ASSERT_TRUE, rocksdb::DB::CompactRange(), rocksdb::Options::create_if_missing, db, dbname, DestroyDB(), rocksdb::Options::num_levels, rocksdb::DB::Open(), rocksdb::DB::Put(), rocksdb::test::TmpDir(), and rocksdb::Status::ToString().

3402  {
3403  std::string dbname = test::TmpDir() + "/db_change_num_levels";
3404  ASSERT_OK(DestroyDB(dbname, Options()));
3405  Options opts;
3406  Status s;
3407  DB* db = nullptr;
3408  opts.create_if_missing = true;
3409  s = DB::Open(opts, dbname, &db);
3410  ASSERT_OK(s);
3411  ASSERT_TRUE(db != nullptr);
3412  db->Put(WriteOptions(), "a", "123");
3413  db->Put(WriteOptions(), "b", "234");
3414  db->CompactRange(nullptr, nullptr);
3415  delete db;
3416  db = nullptr;
3417 
3418  opts.create_if_missing = false;
3419  opts.num_levels = 2;
3420  s = DB::Open(opts, dbname, &db);
3421  ASSERT_TRUE(strstr(s.ToString().c_str(), "Corruption") != nullptr);
3422  ASSERT_TRUE(db == nullptr);
3423 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
DestroyDBMetaDatabase   
)

Definition at line 3425 of file db_test.cc.

References ASSERT_OK, ASSERT_TRUE, rocksdb::Options::create_if_missing, db, dbname, DestroyDB(), MetaDatabaseName(), ripple::Resource::ok, rocksdb::DB::Open(), and rocksdb::test::TmpDir().

3425  {
3426  std::string dbname = test::TmpDir() + "/db_meta";
3427  std::string metadbname = MetaDatabaseName(dbname, 0);
3428  std::string metametadbname = MetaDatabaseName(metadbname, 0);
3429 
3430  // Destroy previous versions if they exist. Using the long way.
3431  ASSERT_OK(DestroyDB(metametadbname, Options()));
3432  ASSERT_OK(DestroyDB(metadbname, Options()));
3433  ASSERT_OK(DestroyDB(dbname, Options()));
3434 
3435  // Setup databases
3436  Options opts;
3437  opts.create_if_missing = true;
3438  DB* db = nullptr;
3439  ASSERT_OK(DB::Open(opts, dbname, &db));
3440  delete db;
3441  db = nullptr;
3442  ASSERT_OK(DB::Open(opts, metadbname, &db));
3443  delete db;
3444  db = nullptr;
3445  ASSERT_OK(DB::Open(opts, metametadbname, &db));
3446  delete db;
3447  db = nullptr;
3448 
3449  // Delete databases
3450  ASSERT_OK(DestroyDB(dbname, Options()));
3451 
3452  // Check if deletion worked.
3453  opts.create_if_missing = false;
3454  ASSERT_TRUE(!(DB::Open(opts, dbname, &db)).ok());
3455  ASSERT_TRUE(!(DB::Open(opts, metadbname, &db)).ok());
3456  ASSERT_TRUE(!(DB::Open(opts, metametadbname, &db)).ok());
3457 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
NoSpace   
)

Definition at line 3460 of file db_test.cc.

References ASSERT_EQ, ASSERT_GE, ASSERT_LT, ASSERT_OK, rocksdb::Options::env, and env_.

3460  {
3461  do {
3462  Options options = CurrentOptions();
3463  options.env = env_;
3464  Reopen(&options);
3465 
3466  ASSERT_OK(Put("foo", "v1"));
3467  ASSERT_EQ("v1", Get("foo"));
3468  Compact("a", "z");
3469  const int num_files = CountFiles();
3470  env_->no_space_.Release_Store(env_); // Force out-of-space errors
3471  env_->sleep_counter_.Reset();
3472  for (int i = 0; i < 5; i++) {
3473  for (int level = 0; level < dbfull()->NumberLevels()-1; level++) {
3474  dbfull()->TEST_CompactRange(level, nullptr, nullptr);
3475  }
3476  }
3477  env_->no_space_.Release_Store(nullptr);
3478  ASSERT_LT(CountFiles(), num_files + 3);
3479 
3480  // Check that compaction attempts slept after errors
3481  ASSERT_GE(env_->sleep_counter_.Read(), 5);
3482  } while (ChangeCompactOptions());
3483 }
rocksdb::TEST ( DBTest  ,
NonWritableFileSystem   
)

Definition at line 3485 of file db_test.cc.

References ASSERT_GT, ASSERT_OK, rocksdb::Options::env, env_, ripple::Resource::ok, rocksdb::Env::SleepForMicroseconds(), and rocksdb::Options::write_buffer_size.

3485  {
3486  do {
3487  Options options = CurrentOptions();
3488  options.write_buffer_size = 1000;
3489  options.env = env_;
3490  Reopen(&options);
3491  ASSERT_OK(Put("foo", "v1"));
3492  env_->non_writable_.Release_Store(env_); // Force errors for new files
3493  std::string big(100000, 'x');
3494  int errors = 0;
3495  for (int i = 0; i < 20; i++) {
3496  if (!Put("foo", big).ok()) {
3497  errors++;
3498  env_->SleepForMicroseconds(100000);
3499  }
3500  }
3501  ASSERT_GT(errors, 0);
3502  env_->non_writable_.Release_Store(nullptr);
3503  } while (ChangeCompactOptions());
3504 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
ManifestWriteError   
)

Definition at line 3506 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, rocksdb::Options::env, env_, and rocksdb::port::AtomicPointer::Release_Store().

3506  {
3507  // Test for the following problem:
3508  // (a) Compaction produces file F
3509  // (b) Log record containing F is written to MANIFEST file, but Sync() fails
3510  // (c) GC deletes F
3511  // (d) After reopening DB, reads fail since deleted F is named in log record
3512 
3513  // We iterate twice. In the second iteration, everything is the
3514  // same except the log record never makes it to the MANIFEST file.
3515  for (int iter = 0; iter < 2; iter++) {
3516  port::AtomicPointer* error_type = (iter == 0)
3517  ? &env_->manifest_sync_error_
3518  : &env_->manifest_write_error_;
3519 
3520  // Insert foo=>bar mapping
3521  Options options = CurrentOptions();
3522  options.env = env_;
3523  options.create_if_missing = true;
3524  options.error_if_exists = false;
3525  DestroyAndReopen(&options);
3526  ASSERT_OK(Put("foo", "bar"));
3527  ASSERT_EQ("bar", Get("foo"));
3528 
3529  // Memtable compaction (will succeed)
3530  dbfull()->TEST_FlushMemTable();
3531  ASSERT_EQ("bar", Get("foo"));
3532  const int last = dbfull()->MaxMemCompactionLevel();
3533  ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo=>bar is now in last level
3534 
3535  // Merging compaction (will fail)
3536  error_type->Release_Store(env_);
3537  dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail
3538  ASSERT_EQ("bar", Get("foo"));
3539 
3540  // Recovery: should not lose data
3541  error_type->Release_Store(nullptr);
3542  Reopen(&options);
3543  ASSERT_EQ("bar", Get("foo"));
3544  }
3545 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
PutFailsParanoid   
)

Definition at line 3547 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, ASSERT_TRUE, rocksdb::Options::create_if_missing, rocksdb::Options::env, env_, rocksdb::Options::error_if_exists, rocksdb::Status::ok(), and rocksdb::Options::paranoid_checks.

3547  {
3548  // Test the following:
3549  // (a) A random put fails in paranoid mode (simulate by sync fail)
3550  // (b) All other puts have to fail, even if writes would succeed
3551  // (c) All of that should happen ONLY if paranoid_checks = true
3552 
3553  Options options = CurrentOptions();
3554  options.env = env_;
3555  options.create_if_missing = true;
3556  options.error_if_exists = false;
3557  options.paranoid_checks = true;
3558  DestroyAndReopen(&options);
3559  Status s;
3560 
3561  ASSERT_OK(Put("foo", "bar"));
3562  ASSERT_OK(Put("foo1", "bar1"));
3563  // simulate error
3564  env_->log_write_error_.Release_Store(env_);
3565  s = Put("foo2", "bar2");
3566  ASSERT_TRUE(!s.ok());
3567  env_->log_write_error_.Release_Store(nullptr);
3568  s = Put("foo3", "bar3");
3569  // the next put should fail, too
3570  ASSERT_TRUE(!s.ok());
3571  // but we're still able to read
3572  ASSERT_EQ("bar", Get("foo"));
3573 
3574  // do the same thing with paranoid checks off
3575  options.paranoid_checks = false;
3576  DestroyAndReopen(&options);
3577 
3578  ASSERT_OK(Put("foo", "bar"));
3579  ASSERT_OK(Put("foo1", "bar1"));
3580  // simulate error
3581  env_->log_write_error_.Release_Store(env_);
3582  s = Put("foo2", "bar2");
3583  ASSERT_TRUE(!s.ok());
3584  env_->log_write_error_.Release_Store(nullptr);
3585  s = Put("foo3", "bar3");
3586  // the next put should NOT fail
3587  ASSERT_TRUE(s.ok());
3588 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
FilesDeletedAfterCompaction   
)

Definition at line 3590 of file db_test.cc.

References ASSERT_EQ, and ASSERT_OK.

3590  {
3591  do {
3592  ASSERT_OK(Put("foo", "v2"));
3593  Compact("a", "z");
3594  const int num_files = CountLiveFiles();
3595  for (int i = 0; i < 10; i++) {
3596  ASSERT_OK(Put("foo", "v2"));
3597  Compact("a", "z");
3598  }
3599  ASSERT_EQ(CountLiveFiles(), num_files);
3600  } while (ChangeCompactOptions());
3601 }
rocksdb::TEST ( DBTest  ,
BloomFilter   
)

Definition at line 3603 of file db_test.cc.

References ASSERT_EQ, ASSERT_GE, ASSERT_LE, ASSERT_OK, rocksdb::Options::env, env_, rocksdb::Options::filter_policy, NewBloomFilterPolicy(), and rocksdb::Options::no_block_cache.

3603  {
3604  do {
3605  env_->count_random_reads_ = true;
3606  Options options = CurrentOptions();
3607  options.env = env_;
3608  options.no_block_cache = true;
3609  options.filter_policy = NewBloomFilterPolicy(10);
3610  Reopen(&options);
3611 
3612  // Populate multiple layers
3613  const int N = 10000;
3614  for (int i = 0; i < N; i++) {
3615  ASSERT_OK(Put(Key(i), Key(i)));
3616  }
3617  Compact("a", "z");
3618  for (int i = 0; i < N; i += 100) {
3619  ASSERT_OK(Put(Key(i), Key(i)));
3620  }
3621  dbfull()->TEST_FlushMemTable();
3622 
3623  // Prevent auto compactions triggered by seeks
3624  env_->delay_sstable_sync_.Release_Store(env_);
3625 
3626  // Lookup present keys. Should rarely read from small sstable.
3627  env_->random_read_counter_.Reset();
3628  for (int i = 0; i < N; i++) {
3629  ASSERT_EQ(Key(i), Get(Key(i)));
3630  }
3631  int reads = env_->random_read_counter_.Read();
3632  fprintf(stderr, "%d present => %d reads\n", N, reads);
3633  ASSERT_GE(reads, N);
3634  ASSERT_LE(reads, N + 2*N/100);
3635 
3636  // Lookup present keys. Should rarely read from either sstable.
3637  env_->random_read_counter_.Reset();
3638  for (int i = 0; i < N; i++) {
3639  ASSERT_EQ("NOT_FOUND", Get(Key(i) + ".missing"));
3640  }
3641  reads = env_->random_read_counter_.Read();
3642  fprintf(stderr, "%d missing => %d reads\n", N, reads);
3643  ASSERT_LE(reads, 3*N/100);
3644 
3645  env_->delay_sstable_sync_.Release_Store(nullptr);
3646  Close();
3647  delete options.filter_policy;
3648  } while (ChangeCompactOptions());
3649 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
SnapshotFiles   
)

Definition at line 3651 of file db_test.cc.

References ASSERT_EQ, ASSERT_GE, ASSERT_GT, ASSERT_OK, ripple::compare(), rocksdb::Options::create_if_missing, dbname_, rocksdb::DB::DisableFileDeletions(), env_, rocksdb::DB::Get(), rocksdb::Env::GetFileSize(), kDescriptorFile, ripple::min(), rocksdb::Env::NewSequentialFile(), rocksdb::Env::NewWritableFile(), rocksdb::DB::Open(), ParseFileName(), RandomString(), rocksdb::SequentialFile::Read(), rocksdb::Slice::size(), and rocksdb::Options::write_buffer_size.

3651  {
3652  do {
3653  Options options = CurrentOptions();
3654  const EnvOptions soptions;
3655  options.write_buffer_size = 100000000; // Large write buffer
3656  Reopen(&options);
3657 
3658  Random rnd(301);
3659 
3660  // Write 8MB (80 values, each 100K)
3661  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
3662  std::vector<std::string> values;
3663  for (int i = 0; i < 80; i++) {
3664  values.push_back(RandomString(&rnd, 100000));
3665  ASSERT_OK(Put(Key(i), values[i]));
3666  }
3667 
3668  // assert that nothing makes it to disk yet.
3669  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
3670 
3671  // get a file snapshot
3672  uint64_t manifest_number = 0;
3673  uint64_t manifest_size = 0;
3674  std::vector<std::string> files;
3675  dbfull()->DisableFileDeletions();
3676  dbfull()->GetLiveFiles(files, &manifest_size);
3677 
3678  // CURRENT, MANIFEST, *.sst files
3679  ASSERT_EQ(files.size(), 3U);
3680 
3681  uint64_t number = 0;
3682  FileType type;
3683 
3684  // copy these files to a new snapshot directory
3685  std::string snapdir = dbname_ + ".snapdir/";
3686  std::string mkdir = "mkdir -p " + snapdir;
3687  ASSERT_EQ(system(mkdir.c_str()), 0);
3688 
3689  for (unsigned int i = 0; i < files.size(); i++) {
3690  // our clients require that GetLiveFiles returns
3691  // files with "/" as first character!
3692  ASSERT_EQ(files[i][0], '/');
3693  std::string src = dbname_ + files[i];
3694  std::string dest = snapdir + files[i];
3695 
3696  uint64_t size;
3697  ASSERT_OK(env_->GetFileSize(src, &size));
3698 
3699  // record the number and the size of the
3700  // latest manifest file
3701  if (ParseFileName(files[i].substr(1), &number, &type)) {
3702  if (type == kDescriptorFile) {
3703  if (number > manifest_number) {
3704  manifest_number = number;
3705  ASSERT_GE(size, manifest_size);
3706  size = manifest_size; // copy only valid MANIFEST data
3707  }
3708  }
3709  }
3710  unique_ptr<SequentialFile> srcfile;
3711  ASSERT_OK(env_->NewSequentialFile(src, &srcfile, soptions));
3712  unique_ptr<WritableFile> destfile;
3713  ASSERT_OK(env_->NewWritableFile(dest, &destfile, soptions));
3714 
3715  char buffer[4096];
3716  Slice slice;
3717  while (size > 0) {
3718  uint64_t one = std::min(uint64_t(sizeof(buffer)), size);
3719  ASSERT_OK(srcfile->Read(one, &slice, buffer));
3720  ASSERT_OK(destfile->Append(slice));
3721  size -= slice.size();
3722  }
3723  ASSERT_OK(destfile->Close());
3724  }
3725 
3726  // release file snapshot
3727  dbfull()->DisableFileDeletions();
3728 
3729  // overwrite one key, this key should not appear in the snapshot
3730  std::vector<std::string> extras;
3731  for (unsigned int i = 0; i < 1; i++) {
3732  extras.push_back(RandomString(&rnd, 100000));
3733  ASSERT_OK(Put(Key(i), extras[i]));
3734  }
3735 
3736  // verify that data in the snapshot are correct
3737  Options opts;
3738  DB* snapdb;
3739  opts.create_if_missing = false;
3740  Status stat = DB::Open(opts, snapdir, &snapdb);
3741  ASSERT_OK(stat);
3742 
3743  ReadOptions roptions;
3744  std::string val;
3745  for (unsigned int i = 0; i < 80; i++) {
3746  stat = snapdb->Get(roptions, Key(i), &val);
3747  ASSERT_EQ(values[i].compare(val), 0);
3748  }
3749  delete snapdb;
3750 
3751  // look at the new live files after we added an 'extra' key
3752  // and after we took the first snapshot.
3753  uint64_t new_manifest_number = 0;
3754  uint64_t new_manifest_size = 0;
3755  std::vector<std::string> newfiles;
3756  dbfull()->DisableFileDeletions();
3757  dbfull()->GetLiveFiles(newfiles, &new_manifest_size);
3758 
3759  // find the new manifest file. assert that this manifest file is
3760  // the same one as in the previous snapshot. But its size should be
3761  // larger because we added an extra key after taking the
3762  // previous shapshot.
3763  for (unsigned int i = 0; i < newfiles.size(); i++) {
3764  std::string src = dbname_ + "/" + newfiles[i];
3765  // record the lognumber and the size of the
3766  // latest manifest file
3767  if (ParseFileName(newfiles[i].substr(1), &number, &type)) {
3768  if (type == kDescriptorFile) {
3769  if (number > new_manifest_number) {
3770  uint64_t size;
3771  new_manifest_number = number;
3772  ASSERT_OK(env_->GetFileSize(src, &size));
3773  ASSERT_GE(size, new_manifest_size);
3774  }
3775  }
3776  }
3777  }
3778  ASSERT_EQ(manifest_number, new_manifest_number);
3779  ASSERT_GT(new_manifest_size, manifest_size);
3780 
3781  // release file snapshot
3782  dbfull()->DisableFileDeletions();
3783  } while (ChangeCompactOptions());
3784 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
CompactOnFlush   
)

Definition at line 3786 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, db_, rocksdb::Options::disable_auto_compactions, rocksdb::DBImpl::GetSnapshot(), rocksdb::Options::purge_redundant_kvs_while_flush, and rocksdb::DBImpl::ReleaseSnapshot().

3786  {
3787  do {
3788  Options options = CurrentOptions();
3789  options.purge_redundant_kvs_while_flush = true;
3790  options.disable_auto_compactions = true;
3791  Reopen(&options);
3792 
3793  Put("foo", "v1");
3794  ASSERT_OK(dbfull()->TEST_FlushMemTable());
3795  ASSERT_EQ(AllEntriesFor("foo"), "[ v1 ]");
3796 
3797  // Write two new keys
3798  Put("a", "begin");
3799  Put("z", "end");
3800  dbfull()->TEST_FlushMemTable();
3801 
3802  // Case1: Delete followed by a put
3803  Delete("foo");
3804  Put("foo", "v2");
3805  ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
3806 
3807  // After the current memtable is flushed, the DEL should
3808  // have been removed
3809  ASSERT_OK(dbfull()->TEST_FlushMemTable());
3810  ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]");
3811 
3812  dbfull()->CompactRange(nullptr, nullptr);
3813  ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]");
3814 
3815  // Case 2: Delete followed by another delete
3816  Delete("foo");
3817  Delete("foo");
3818  ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, DEL, v2 ]");
3819  ASSERT_OK(dbfull()->TEST_FlushMemTable());
3820  ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v2 ]");
3821  dbfull()->CompactRange(nullptr, nullptr);
3822  ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
3823 
3824  // Case 3: Put followed by a delete
3825  Put("foo", "v3");
3826  Delete("foo");
3827  ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v3 ]");
3828  ASSERT_OK(dbfull()->TEST_FlushMemTable());
3829  ASSERT_EQ(AllEntriesFor("foo"), "[ DEL ]");
3830  dbfull()->CompactRange(nullptr, nullptr);
3831  ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
3832 
3833  // Case 4: Put followed by another Put
3834  Put("foo", "v4");
3835  Put("foo", "v5");
3836  ASSERT_EQ(AllEntriesFor("foo"), "[ v5, v4 ]");
3837  ASSERT_OK(dbfull()->TEST_FlushMemTable());
3838  ASSERT_EQ(AllEntriesFor("foo"), "[ v5 ]");
3839  dbfull()->CompactRange(nullptr, nullptr);
3840  ASSERT_EQ(AllEntriesFor("foo"), "[ v5 ]");
3841 
3842  // clear database
3843  Delete("foo");
3844  dbfull()->CompactRange(nullptr, nullptr);
3845  ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
3846 
3847  // Case 5: Put followed by snapshot followed by another Put
3848  // Both puts should remain.
3849  Put("foo", "v6");
3850  const Snapshot* snapshot = db_->GetSnapshot();
3851  Put("foo", "v7");
3852  ASSERT_OK(dbfull()->TEST_FlushMemTable());
3853  ASSERT_EQ(AllEntriesFor("foo"), "[ v7, v6 ]");
3854  db_->ReleaseSnapshot(snapshot);
3855 
3856  // clear database
3857  Delete("foo");
3858  dbfull()->CompactRange(nullptr, nullptr);
3859  ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
3860 
3861  // Case 5: snapshot followed by a put followed by another Put
3862  // Only the last put should remain.
3863  const Snapshot* snapshot1 = db_->GetSnapshot();
3864  Put("foo", "v8");
3865  Put("foo", "v9");
3866  ASSERT_OK(dbfull()->TEST_FlushMemTable());
3867  ASSERT_EQ(AllEntriesFor("foo"), "[ v9 ]");
3868  db_->ReleaseSnapshot(snapshot1);
3869  } while (ChangeCompactOptions());
3870 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
WALArchivalTtl   
)

Definition at line 3888 of file db_test.cc.

References ArchivalDirectory(), ASSERT_OK, ASSERT_TRUE, rocksdb::Options::create_if_missing, dbname_, env_, ListLogFiles(), rocksdb::Env::SleepForMicroseconds(), and rocksdb::Options::WAL_ttl_seconds.

3888  {
3889  do {
3890  Options options = CurrentOptions();
3891  options.create_if_missing = true;
3892  options.WAL_ttl_seconds = 1000;
3893  DestroyAndReopen(&options);
3894 
3895  // TEST : Create DB with a ttl and no size limit.
3896  // Put some keys. Count the log files present in the DB just after insert.
3897  // Re-open db. Causes deletion/archival to take place.
3898  // Assert that the files moved under "/archive".
3899  // Reopen db with small ttl.
3900  // Assert that archive was removed.
3901 
3902  std::string archiveDir = ArchivalDirectory(dbname_);
3903 
3904  for (int i = 0; i < 10; ++i) {
3905  for (int j = 0; j < 10; ++j) {
3906  ASSERT_OK(Put(Key(10 * i + j), DummyString(1024)));
3907  }
3908 
3909  std::vector<uint64_t> log_files = ListLogFiles(env_, dbname_);
3910 
3911  options.create_if_missing = false;
3912  Reopen(&options);
3913 
3914  std::vector<uint64_t> logs = ListLogFiles(env_, archiveDir);
3915  std::set<uint64_t> archivedFiles(logs.begin(), logs.end());
3916 
3917  for (auto& log : log_files) {
3918  ASSERT_TRUE(archivedFiles.find(log) != archivedFiles.end());
3919  }
3920  }
3921 
3922  std::vector<uint64_t> log_files = ListLogFiles(env_, archiveDir);
3923  ASSERT_TRUE(log_files.size() > 0);
3924 
3925  options.WAL_ttl_seconds = 1;
3926  env_->SleepForMicroseconds(2 * 1000 * 1000);
3927  Reopen(&options);
3928 
3929  log_files = ListLogFiles(env_, archiveDir);
3930  ASSERT_TRUE(log_files.empty());
3931  } while (ChangeCompactOptions());
3932 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
WALArchivalSizeLimit   
)

Definition at line 3951 of file db_test.cc.

References ArchivalDirectory(), ASSERT_OK, ASSERT_TRUE, rocksdb::Options::create_if_missing, dbname_, env_, GetLogDirSize(), ListLogFiles(), rocksdb::Env::SleepForMicroseconds(), rocksdb::Options::WAL_size_limit_MB, and rocksdb::Options::WAL_ttl_seconds.

3951  {
3952  do {
3953  Options options = CurrentOptions();
3954  options.create_if_missing = true;
3955  options.WAL_ttl_seconds = 0;
3956  options.WAL_size_limit_MB = 1000;
3957 
3958  // TEST : Create DB with huge size limit and no ttl.
3959  // Put some keys. Count the archived log files present in the DB
3960  // just after insert. Assert that there are many enough.
3961  // Change size limit. Re-open db.
3962  // Assert that archive is not greater than WAL_size_limit_MB.
3963  // Set ttl and time_to_check_ to small values. Re-open db.
3964  // Assert that there are no archived logs left.
3965 
3966  DestroyAndReopen(&options);
3967  for (int i = 0; i < 128 * 128; ++i) {
3968  ASSERT_OK(Put(Key(i), DummyString(1024)));
3969  }
3970  Reopen(&options);
3971 
3972  std::string archive_dir = ArchivalDirectory(dbname_);
3973  std::vector<std::uint64_t> log_files = ListLogFiles(env_, archive_dir);
3974  ASSERT_TRUE(log_files.size() > 2);
3975 
3976  options.WAL_size_limit_MB = 8;
3977  Reopen(&options);
3978  dbfull()->TEST_PurgeObsoleteteWAL();
3979 
3980  uint64_t archive_size = GetLogDirSize(archive_dir, env_);
3981  ASSERT_TRUE(archive_size <= options.WAL_size_limit_MB * 1024 * 1024);
3982 
3983  options.WAL_ttl_seconds = 1;
3984  dbfull()->TEST_SetDefaultTimeToCheck(1);
3985  env_->SleepForMicroseconds(2 * 1000 * 1000);
3986  Reopen(&options);
3987  dbfull()->TEST_PurgeObsoleteteWAL();
3988 
3989  log_files = ListLogFiles(env_, archive_dir);
3990  ASSERT_TRUE(log_files.empty());
3991  } while (ChangeCompactOptions());
3992 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
TransactionLogIterator   
)

Definition at line 4019 of file db_test.cc.

References ASSERT_EQ, env_, ExpectRecords(), and rocksdb::Env::SleepForMicroseconds().

4019  {
4020  do {
4021  Options options = OptionsForLogIterTest();
4022  DestroyAndReopen(&options);
4023  Put("key1", DummyString(1024));
4024  Put("key2", DummyString(1024));
4025  Put("key2", DummyString(1024));
4026  ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 3U);
4027  {
4028  auto iter = OpenTransactionLogIter(0);
4029  ExpectRecords(3, iter);
4030  }
4031  Reopen(&options);
4032  env_->SleepForMicroseconds(2 * 1000 * 1000);{
4033  Put("key4", DummyString(1024));
4034  Put("key5", DummyString(1024));
4035  Put("key6", DummyString(1024));
4036  }
4037  {
4038  auto iter = OpenTransactionLogIter(0);
4039  ExpectRecords(6, iter);
4040  }
4041  } while (ChangeCompactOptions());
4042 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
TransactionLogIteratorMoveOverZeroFiles   
)

Definition at line 4044 of file db_test.cc.

References ExpectRecords().

4044  {
4045  do {
4046  Options options = OptionsForLogIterTest();
4047  DestroyAndReopen(&options);
4048  // Do a plain Reopen.
4049  Put("key1", DummyString(1024));
4050  // Two reopens should create a zero record WAL file.
4051  Reopen(&options);
4052  Reopen(&options);
4053 
4054  Put("key2", DummyString(1024));
4055 
4056  auto iter = OpenTransactionLogIter(0);
4057  ExpectRecords(2, iter);
4058  } while (ChangeCompactOptions());
4059 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
TransactionLogIteratorJustEmptyFile   
)

Definition at line 4083 of file db_test.cc.

References ASSERT_TRUE, and rocksdb::TransactionLogIterator::Valid().

4083  {
4084  do {
4085  Options options = OptionsForLogIterTest();
4086  DestroyAndReopen(&options);
4087  unique_ptr<TransactionLogIterator> iter;
4088  Status status = dbfull()->GetUpdatesSince(0, &iter);
4089  // Check that an empty iterator is returned
4090  ASSERT_TRUE(!iter->Valid());
4091  } while (ChangeCompactOptions());
4092 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
TransactionLogIteratorCheckAfterRestart   
)

Definition at line 4094 of file db_test.cc.

References ExpectRecords().

4094  {
4095  do {
4096  Options options = OptionsForLogIterTest();
4097  DestroyAndReopen(&options);
4098  Put("key1", DummyString(1024));
4099  Put("key2", DummyString(1023));
4100  dbfull()->Flush(FlushOptions());
4101  Reopen(&options);
4102  auto iter = OpenTransactionLogIter(0);
4103  ExpectRecords(2, iter);
4104  } while (ChangeCompactOptions());
4105 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
TransactionLogIteratorCorruptedLog   
)

Definition at line 4107 of file db_test.cc.

References ASSERT_EQ, ASSERT_LT, ASSERT_OK, dbname_, ExpectRecords(), ReadRecords(), and beast::IP::to_string().

4107  {
4108  do {
4109  Options options = OptionsForLogIterTest();
4110  DestroyAndReopen(&options);
4111  for (int i = 0; i < 1024; i++) {
4112  Put("key"+std::to_string(i), DummyString(10));
4113  }
4114  dbfull()->Flush(FlushOptions());
4115  // Corrupt this log to create a gap
4116  rocksdb::VectorLogPtr wal_files;
4117  ASSERT_OK(dbfull()->GetSortedWalFiles(wal_files));
4118  const auto logfilePath = dbname_ + "/" + wal_files.front()->PathName();
4119  ASSERT_EQ(
4120  0,
4121  truncate(logfilePath.c_str(), wal_files.front()->SizeFileBytes() / 2));
4122  // Insert a new entry to a new log file
4123  Put("key1025", DummyString(10));
4124  // Try to read from the beginning. Should stop before the gap and read less
4125  // than 1025 entries
4126  auto iter = OpenTransactionLogIter(0);
4127  int count;
4128  int last_sequence_read = ReadRecords(iter, count);
4129  ASSERT_LT(last_sequence_read, 1025);
4130  // Try to read past the gap, should be able to seek to key1025
4131  auto iter2 = OpenTransactionLogIter(last_sequence_read + 1);
4132  ExpectRecords(1, iter2);
4133  } while (ChangeCompactOptions());
4134 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
TransactionLogIteratorBatchOperations   
)

Definition at line 4136 of file db_test.cc.

References rocksdb::WriteBatch::Delete(), ExpectRecords(), and rocksdb::WriteBatch::Put().

4136  {
4137  do {
4138  Options options = OptionsForLogIterTest();
4139  DestroyAndReopen(&options);
4140  WriteBatch batch;
4141  batch.Put("key1", DummyString(1024));
4142  batch.Put("key2", DummyString(1024));
4143  batch.Put("key3", DummyString(1024));
4144  batch.Delete("key2");
4145  dbfull()->Write(WriteOptions(), &batch);
4146  dbfull()->Flush(FlushOptions());
4147  Reopen(&options);
4148  Put("key4", DummyString(1024));
4149  auto iter = OpenTransactionLogIter(3);
4150  ExpectRecords(2, iter);
4151  } while (ChangeCompactOptions());
4152 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
TransactionLogIteratorBlobs   
)

Definition at line 4154 of file db_test.cc.

References ASSERT_EQ, rocksdb::WriteBatch::Delete(), rocksdb::WriteBatch::Put(), rocksdb::WriteBatch::PutLogData(), seen, rocksdb::Slice::size(), beast::IP::to_string(), rocksdb::Slice::ToString(), and value.

4154  {
4155  Options options = OptionsForLogIterTest();
4156  DestroyAndReopen(&options);
4157  {
4158  WriteBatch batch;
4159  batch.Put("key1", DummyString(1024));
4160  batch.Put("key2", DummyString(1024));
4161  batch.PutLogData(Slice("blob1"));
4162  batch.Put("key3", DummyString(1024));
4163  batch.PutLogData(Slice("blob2"));
4164  batch.Delete("key2");
4165  dbfull()->Write(WriteOptions(), &batch);
4166  Reopen(&options);
4167  }
4168 
4169  auto res = OpenTransactionLogIter(0)->GetBatch();
4170  struct Handler : public WriteBatch::Handler {
4171  std::string seen;
4172  virtual void Put(const Slice& key, const Slice& value) {
4173  seen += "Put(" + key.ToString() + ", " + std::to_string(value.size()) +
4174  ")";
4175  }
4176  virtual void Merge(const Slice& key, const Slice& value) {
4177  seen += "Merge(" + key.ToString() + ", " + std::to_string(value.size()) +
4178  ")";
4179  }
4180  virtual void LogData(const Slice& blob) {
4181  seen += "LogData(" + blob.ToString() + ")";
4182  }
4183  virtual void Delete(const Slice& key) {
4184  seen += "Delete(" + key.ToString() + ")";
4185  }
4186  } handler;
4187  res.writeBatchPtr->Iterate(&handler);
4188  ASSERT_EQ("Put(key1, 1024)"
4189  "Put(key2, 1024)"
4190  "LogData(blob1)"
4191  "Put(key3, 1024)"
4192  "LogData(blob2)"
4193  "Delete(key2)", handler.seen);
4194 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
ReadCompaction   
)

Definition at line 4196 of file db_test.cc.

References ASSERT_EQ, ASSERT_NE, ASSERT_OK, ASSERT_TRUE, rocksdb::Options::block_size, rocksdb::Options::create_if_missing, env_, rocksdb::Options::filter_policy, rocksdb::Options::max_open_files, rocksdb::Options::no_block_cache, rocksdb::Env::SleepForMicroseconds(), rocksdb::Options::target_file_size_base, value, and rocksdb::Options::write_buffer_size.

4196  {
4197  std::string value(4096, '4'); // a string of size 4K
4198  {
4199  Options options = CurrentOptions();
4200  options.create_if_missing = true;
4201  options.max_open_files = 20; // only 10 file in file-cache
4202  options.target_file_size_base = 512;
4203  options.write_buffer_size = 64 * 1024;
4204  options.filter_policy = nullptr;
4205  options.block_size = 4096;
4206  options.no_block_cache = true;
4207 
4208  Reopen(&options);
4209 
4210  // Write 8MB (2000 values, each 4K)
4211  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
4212  std::vector<std::string> values;
4213  for (int i = 0; i < 2000; i++) {
4214  ASSERT_OK(Put(Key(i), value));
4215  }
4216 
4217  // clear level 0 and 1 if necessary.
4218  dbfull()->TEST_FlushMemTable();
4219  dbfull()->TEST_CompactRange(0, nullptr, nullptr);
4220  dbfull()->TEST_CompactRange(1, nullptr, nullptr);
4221  ASSERT_EQ(NumTableFilesAtLevel(0), 0);
4222  ASSERT_EQ(NumTableFilesAtLevel(1), 0);
4223 
4224  // write some new keys into level 0
4225  for (int i = 0; i < 2000; i = i + 16) {
4226  ASSERT_OK(Put(Key(i), value));
4227  }
4228  dbfull()->Flush(FlushOptions());
4229 
4230  // Wait for any write compaction to finish
4231  dbfull()->TEST_WaitForCompact();
4232 
4233  // remember number of files in each level
4234  int l1 = NumTableFilesAtLevel(0);
4235  int l2 = NumTableFilesAtLevel(1);
4236  int l3 = NumTableFilesAtLevel(3);
4237  ASSERT_NE(NumTableFilesAtLevel(0), 0);
4238  ASSERT_NE(NumTableFilesAtLevel(1), 0);
4239  ASSERT_NE(NumTableFilesAtLevel(2), 0);
4240 
4241  // read a bunch of times, trigger read compaction
4242  for (int j = 0; j < 100; j++) {
4243  for (int i = 0; i < 2000; i++) {
4244  Get(Key(i));
4245  }
4246  }
4247  // wait for read compaction to finish
4248  env_->SleepForMicroseconds(1000000);
4249 
4250  // verify that the number of files have decreased
4251  // in some level, indicating that there was a compaction
4252  ASSERT_TRUE(NumTableFilesAtLevel(0) < l1 ||
4253  NumTableFilesAtLevel(1) < l2 ||
4254  NumTableFilesAtLevel(2) < l3);
4255  }
4256 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
MultiThreaded   
)

Definition at line 4324 of file db_test.cc.

References env_, id, rocksdb::Env::SleepForMicroseconds(), and rocksdb::Env::StartThread().

4324  {
4325  do {
4326  // Initialize state
4327  MTState mt;
4328  mt.test = this;
4329  mt.stop.Release_Store(0);
4330  for (int id = 0; id < kNumThreads; id++) {
4331  mt.counter[id].Release_Store(0);
4332  mt.thread_done[id].Release_Store(0);
4333  }
4334 
4335  // Start threads
4336  MTThread thread[kNumThreads];
4337  for (int id = 0; id < kNumThreads; id++) {
4338  thread[id].state = &mt;
4339  thread[id].id = id;
4340  env_->StartThread(MTThreadBody, &thread[id]);
4341  }
4342 
4343  // Let them run for a while
4344  env_->SleepForMicroseconds(kTestSeconds * 1000000);
4345 
4346  // Stop the threads and wait for them to finish
4347  mt.stop.Release_Store(&mt);
4348  for (int id = 0; id < kNumThreads; id++) {
4349  while (mt.thread_done[id].Acquire_Load() == nullptr) {
4350  env_->SleepForMicroseconds(100000);
4351  }
4352  }
4353  } while (ChangeOptions());
4354 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
Randomized   
)

Definition at line 4589 of file db_test.cc.

References ASSERT_OK, ASSERT_TRUE, CompareIterators(), db_, rocksdb::DBImpl::Delete(), rocksdb::WriteBatch::Delete(), rocksdb::ModelDB::Delete(), rocksdb::DBImpl::GetSnapshot(), rocksdb::ModelDB::GetSnapshot(), rocksdb::Random::OneIn(), rocksdb::DBImpl::Put(), rocksdb::WriteBatch::Put(), rocksdb::ModelDB::Put(), RandomKey(), rocksdb::test::RandomSeed(), RandomString(), rocksdb::DBImpl::ReleaseSnapshot(), rocksdb::ModelDB::ReleaseSnapshot(), rocksdb::Random::Uniform(), rocksdb::DBImpl::Write(), and rocksdb::ModelDB::Write().

4589  {
4590  Random rnd(test::RandomSeed());
4591  do {
4592  ModelDB model(CurrentOptions());
4593  const int N = 10000;
4594  const Snapshot* model_snap = nullptr;
4595  const Snapshot* db_snap = nullptr;
4596  std::string k, v;
4597  for (int step = 0; step < N; step++) {
4598  // TODO(sanjay): Test Get() works
4599  int p = rnd.Uniform(100);
4600  int minimum = 0;
4601  if (option_config_ == kPrefixHashRep) {
4602  minimum = 1;
4603  }
4604  if (p < 45) { // Put
4605  k = RandomKey(&rnd, minimum);
4606  v = RandomString(&rnd,
4607  rnd.OneIn(20)
4608  ? 100 + rnd.Uniform(100)
4609  : rnd.Uniform(8));
4610  ASSERT_OK(model.Put(WriteOptions(), k, v));
4611  ASSERT_OK(db_->Put(WriteOptions(), k, v));
4612 
4613  } else if (p < 90) { // Delete
4614  k = RandomKey(&rnd, minimum);
4615  ASSERT_OK(model.Delete(WriteOptions(), k));
4616  ASSERT_OK(db_->Delete(WriteOptions(), k));
4617 
4618 
4619  } else { // Multi-element batch
4620  WriteBatch b;
4621  const int num = rnd.Uniform(8);
4622  for (int i = 0; i < num; i++) {
4623  if (i == 0 || !rnd.OneIn(10)) {
4624  k = RandomKey(&rnd, minimum);
4625  } else {
4626  // Periodically re-use the same key from the previous iter, so
4627  // we have multiple entries in the write batch for the same key
4628  }
4629  if (rnd.OneIn(2)) {
4630  v = RandomString(&rnd, rnd.Uniform(10));
4631  b.Put(k, v);
4632  } else {
4633  b.Delete(k);
4634  }
4635  }
4636  ASSERT_OK(model.Write(WriteOptions(), &b));
4637  ASSERT_OK(db_->Write(WriteOptions(), &b));
4638  }
4639 
4640  if ((step % 100) == 0) {
4641  ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
4642  ASSERT_TRUE(CompareIterators(step, &model, db_, model_snap, db_snap));
4643  // Save a snapshot from each DB this time that we'll use next
4644  // time we compare things, to make sure the current state is
4645  // preserved with the snapshot
4646  if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
4647  if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
4648 
4649  Reopen();
4650  ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
4651 
4652  model_snap = model.GetSnapshot();
4653  db_snap = db_->GetSnapshot();
4654  }
4655  }
4656  if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
4657  if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
4658  } while (ChangeOptions(kSkipDeletesFilterFirst));
4659 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
MultiGetSimple   
)

Definition at line 4661 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, ASSERT_TRUE, db_, rocksdb::DBImpl::Delete(), rocksdb::DBImpl::MultiGet(), and rocksdb::DBImpl::Put().

4661  {
4662  do {
4663  ASSERT_OK(db_->Put(WriteOptions(),"k1","v1"));
4664  ASSERT_OK(db_->Put(WriteOptions(),"k2","v2"));
4665  ASSERT_OK(db_->Put(WriteOptions(),"k3","v3"));
4666  ASSERT_OK(db_->Put(WriteOptions(),"k4","v4"));
4667  ASSERT_OK(db_->Delete(WriteOptions(),"k4"));
4668  ASSERT_OK(db_->Put(WriteOptions(),"k5","v5"));
4669  ASSERT_OK(db_->Delete(WriteOptions(),"no_key"));
4670 
4671  std::vector<Slice> keys(6);
4672  keys[0] = "k1";
4673  keys[1] = "k2";
4674  keys[2] = "k3";
4675  keys[3] = "k4";
4676  keys[4] = "k5";
4677  keys[5] = "no_key";
4678 
4679  std::vector<std::string> values(20,"Temporary data to be overwritten");
4680 
4681  std::vector<Status> s = db_->MultiGet(ReadOptions(),keys,&values);
4682  ASSERT_EQ(values.size(),keys.size());
4683  ASSERT_EQ(values[0], "v1");
4684  ASSERT_EQ(values[1], "v2");
4685  ASSERT_EQ(values[2], "v3");
4686  ASSERT_EQ(values[4], "v5");
4687 
4688  ASSERT_OK(s[0]);
4689  ASSERT_OK(s[1]);
4690  ASSERT_OK(s[2]);
4691  ASSERT_TRUE(s[3].IsNotFound());
4692  ASSERT_OK(s[4]);
4693  ASSERT_TRUE(s[5].IsNotFound());
4694  } while (ChangeCompactOptions());
4695 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
MultiGetEmpty   
)

Definition at line 4697 of file db_test.cc.

References ASSERT_EQ, ASSERT_TRUE, db_, and rocksdb::DBImpl::MultiGet().

4697  {
4698  do {
4699  // Empty Key Set
4700  std::vector<Slice> keys;
4701  std::vector<std::string> values;
4702  std::vector<Status> s = db_->MultiGet(ReadOptions(),keys,&values);
4703  ASSERT_EQ((int)s.size(),0);
4704 
4705  // Empty Database, Empty Key Set
4706  DestroyAndReopen();
4707  s = db_->MultiGet(ReadOptions(), keys, &values);
4708  ASSERT_EQ((int)s.size(),0);
4709 
4710  // Empty Database, Search for Keys
4711  keys.resize(2);
4712  keys[0] = "a";
4713  keys[1] = "b";
4714  s = db_->MultiGet(ReadOptions(),keys,&values);
4715  ASSERT_EQ((int)s.size(), 2);
4716  ASSERT_TRUE(s[0].IsNotFound() && s[1].IsNotFound());
4717  } while (ChangeCompactOptions());
4718 }

Here is the call graph for this function:

rocksdb::TEST ( DBTest  ,
PrefixScan   
)

Definition at line 4770 of file db_test.cc.

References ASSERT_EQ, ASSERT_OK, rocksdb::Options::create_if_missing, db_, rocksdb::Options::disable_auto_compactions, rocksdb::Options::disable_seek_compaction, rocksdb::Options::env, env_, rocksdb::Options::filter_policy, rocksdb::Iterator::key(), rocksdb::Options::max_background_compactions, rocksdb::Options::memtable_factory, NewBloomFilterPolicy(), NewFixedPrefixTransform(), NewHashSkipListRepFactory(), rocksdb::DBImpl::NewIterator(), rocksdb::Iterator::Next(), rocksdb::Options::no_block_cache, std::chrono::prefix, rocksdb::ReadOptions::prefix, rocksdb::Options::prefix_extractor, PrefixScanInit(), rocksdb::Iterator::SeekToFirst(), rocksdb::Slice::starts_with(), rocksdb::Iterator::status(), rocksdb::Iterator::Valid(), and rocksdb::Options::whole_key_filtering.

4770  {
4771  for (int it = 0; it < 2; ++it) {
4772  ReadOptions ro = ReadOptions();
4773  int count;
4774  Slice prefix;
4775  Slice key;
4776  char buf[100];
4777  Iterator* iter;
4778  snprintf(buf, sizeof(buf), "03______:");
4779  prefix = Slice(buf, 8);
4780  key = Slice(buf, 9);
4781  auto prefix_extractor = NewFixedPrefixTransform(8);
4782  // db configs
4783  env_->count_random_reads_ = true;
4784  Options options = CurrentOptions();
4785  options.env = env_;
4786  options.no_block_cache = true;
4787  options.filter_policy = NewBloomFilterPolicy(10);
4788  options.prefix_extractor = prefix_extractor;
4789  options.whole_key_filtering = false;
4790  options.disable_auto_compactions = true;
4791  options.max_background_compactions = 2;
4792  options.create_if_missing = true;
4793  options.disable_seek_compaction = true;
4794  if (it == 0) {
4795  options.memtable_factory.reset(NewHashSkipListRepFactory(
4796  prefix_extractor));
4797  } else {
4798  options.memtable_factory = std::make_shared<PrefixHashRepFactory>(
4799  prefix_extractor);
4800  }
4801 
4802  // prefix specified, with blooms: 2 RAND I/Os
4803  // SeekToFirst
4804  DestroyAndReopen(&options);
4805  PrefixScanInit(this);
4806  count = 0;
4807  env_->random_read_counter_.Reset();
4808  ro.prefix = &prefix;
4809  iter = db_->NewIterator(ro);
4810  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
4811  assert(iter->key().starts_with(prefix));
4812  count++;
4813  }
4814  ASSERT_OK(iter->status());
4815  delete iter;
4816  ASSERT_EQ(count, 2);
4817  ASSERT_EQ(env_->random_read_counter_.Read(), 2);
4818 
4819  // prefix specified, with blooms: 2 RAND I/Os
4820  // Seek
4821  DestroyAndReopen(&options);
4822  PrefixScanInit(this);
4823  count = 0;
4824  env_->random_read_counter_.Reset();
4825  ro.prefix = &prefix;
4826  iter = db_->NewIterator(ro);
4827  for (iter->Seek(key); iter->Valid(); iter->Next()) {
4828  assert(iter->key().starts_with(prefix));
4829  count++;
4830  }
4831  ASSERT_OK(iter->status());
4832  delete iter;
4833  ASSERT_EQ(count, 2);
4834  ASSERT_EQ(env_->random_read_counter_.Read(), 2);
4835 
4836  // no prefix specified: 11 RAND I/Os
4837  DestroyAndReopen(&options);
4838  PrefixScanInit(this);
4839  count = 0;
4840  env_->random_read_counter_.Reset();
4841  iter = db_->NewIterator(ReadOptions());
4842  for (iter->Seek(prefix); iter->Valid(); iter->Next()) {
4843  if (! iter->key().starts_with(prefix)) {
4844  break;
4845  }
4846  count++;
4847  }
4848  ASSERT_OK(iter->status());
4849  delete iter;
4850  ASSERT_EQ(count, 2);
4851  ASSERT_EQ(env_->random_read_counter_.Read(), 11);
4852  Close();
4853  delete options.filter_policy;
4854  }
4855 }

Here is the call graph for this function:

static void rocksdb::TestEncodeDecode ( const VersionEdit &  edit)
static

Definition at line 15 of file version_edit_test.cc.

References ASSERT_EQ, ASSERT_TRUE, rocksdb::VersionEdit::DecodeFrom(), rocksdb::VersionEdit::EncodeTo(), rocksdb::Status::ok(), and rocksdb::Status::ToString().

Referenced by TEST().

15  {
16  std::string encoded, encoded2;
17  edit.EncodeTo(&encoded);
18  VersionEdit parsed(7);
19  Status s = parsed.DecodeFrom(encoded);
20  ASSERT_TRUE(s.ok()) << s.ToString();
21  parsed.EncodeTo(&encoded2);
22  ASSERT_EQ(encoded, encoded2);
23 }

Here is the call graph for this function:

Here is the caller graph for this function:

static void rocksdb::TestKey ( const std::string &  key,
uint64_t  seq,
ValueType  vt 
)
static

Definition at line 36 of file dbformat_test.cc.

References ASSERT_EQ, ASSERT_TRUE, IKey(), Json::in(), kTypeValue, ParseInternalKey(), rocksdb::ParsedInternalKey::sequence, rocksdb::Slice::ToString(), rocksdb::ParsedInternalKey::type, and rocksdb::ParsedInternalKey::user_key.

Referenced by rocksdb::TestKeyComparator::Compare(), and TEST().

38  {
39  std::string encoded = IKey(key, seq, vt);
40 
41  Slice in(encoded);
42  ParsedInternalKey decoded("", 0, kTypeValue);
43 
44  ASSERT_TRUE(ParseInternalKey(in, &decoded));
45  ASSERT_EQ(key, decoded.user_key.ToString());
46  ASSERT_EQ(seq, decoded.sequence);
47  ASSERT_EQ(vt, decoded.type);
48 
49  ASSERT_TRUE(!ParseInternalKey(Slice("bar"), &decoded));
50 }

Here is the call graph for this function:

Here is the caller graph for this function:

Slice rocksdb::TestKeyToSlice ( const TestKey &  test_key)
inline

Definition at line 39 of file prefix_test.cc.

Referenced by TEST().

39  {
40  return Slice((const char*)&test_key, sizeof(test_key));
41 }

Here is the caller graph for this function:

static void rocksdb::ThreadBody ( void *  arg)
static

Definition at line 83 of file env_test.cc.

References arg, rocksdb::port::Mutex::Lock(), rocksdb::State::mu, rocksdb::State::num_running, rocksdb::port::Mutex::Unlock(), and rocksdb::State::val.

Referenced by TEST().

83  {
84  State* s = reinterpret_cast<State*>(arg);
85  s->mu.Lock();
86  s->val += 1;
87  s->num_running -= 1;
88  s->mu.Unlock();
89 }

Here is the call graph for this function:

Here is the caller graph for this function:

static uint64_t rocksdb::TotalFileSize ( const std::vector< FileMetaData * > &  files)
static

Definition at line 31 of file version_set.cc.

Referenced by rocksdb::VersionSet::Finalize(), rocksdb::Compaction::IsTrivialMove(), rocksdb::VersionSet::MaxNextLevelOverlappingBytes(), rocksdb::VersionSet::NumLevelBytes(), rocksdb::VersionSet::PickCompactionUniversalReadAmp(), rocksdb::Version::PickLevelForMemTableOutput(), and rocksdb::VersionSet::SetupOtherInputs().

31  {
32  uint64_t sum = 0;
33  for (size_t i = 0; i < files.size() && files[i]; i++) {
34  sum += files[i]->file_size;
35  }
36  return sum;
37 }

Here is the caller graph for this function:

Status rocksdb::UncompressBlockContents ( const char *  data,
size_t  n,
BlockContents *  result 
)

Definition at line 151 of file format.cc.

References rocksdb::port::BZip2_Uncompress(), rocksdb::BlockContents::cachable, rocksdb::BlockContents::compression_type, rocksdb::Status::Corruption(), rocksdb::BlockContents::data, rocksdb::BlockContents::heap_allocated, kBZip2Compression, kNoCompression, kSnappyCompression, kZlibCompression, rocksdb::Status::OK(), rocksdb::port::Snappy_GetUncompressedLength(), rocksdb::port::Snappy_Uncompress(), and rocksdb::port::Zlib_Uncompress().

Referenced by rocksdb::BlockBasedTable::BlockReader(), and ReadBlockContents().

152  {
153  char* ubuf = nullptr;
154  int decompress_size = 0;
155  assert(data[n] != kNoCompression);
156  switch (data[n]) {
157  case kSnappyCompression: {
158  size_t ulength = 0;
159  static char snappy_corrupt_msg[] =
160  "Snappy not supported or corrupted Snappy compressed block contents";
161  if (!port::Snappy_GetUncompressedLength(data, n, &ulength)) {
162  return Status::Corruption(snappy_corrupt_msg);
163  }
164  ubuf = new char[ulength];
165  if (!port::Snappy_Uncompress(data, n, ubuf)) {
166  delete[] ubuf;
167  return Status::Corruption(snappy_corrupt_msg);
168  }
169  result->data = Slice(ubuf, ulength);
170  result->heap_allocated = true;
171  result->cachable = true;
172  break;
173  }
174  case kZlibCompression:
175  ubuf = port::Zlib_Uncompress(data, n, &decompress_size);
176  static char zlib_corrupt_msg[] =
177  "Zlib not supported or corrupted Zlib compressed block contents";
178  if (!ubuf) {
179  return Status::Corruption(zlib_corrupt_msg);
180  }
181  result->data = Slice(ubuf, decompress_size);
182  result->heap_allocated = true;
183  result->cachable = true;
184  break;
185  case kBZip2Compression:
186  ubuf = port::BZip2_Uncompress(data, n, &decompress_size);
187  static char bzip2_corrupt_msg[] =
188  "Bzip2 not supported or corrupted Bzip2 compressed block contents";
189  if (!ubuf) {
190  return Status::Corruption(bzip2_corrupt_msg);
191  }
192  result->data = Slice(ubuf, decompress_size);
193  result->heap_allocated = true;
194  result->cachable = true;
195  break;
196  default:
197  return Status::Corruption("bad block type");
198  }
199  result->compression_type = kNoCompression; // not compressed any more
200  return Status::OK();
201 }

Here is the call graph for this function:

Here is the caller graph for this function:

static void rocksdb::UnrefEntry ( void *  arg1,
void *  arg2 
)
static

Definition at line 26 of file table_cache.cc.

References rocksdb::Cache::Release().

Referenced by rocksdb::TableCache::NewIterator().

26  {
27  Cache* cache = reinterpret_cast<Cache*>(arg1);
28  Cache::Handle* h = reinterpret_cast<Cache::Handle*>(arg2);
29  cache->Release(h);
30 }

Here is the call graph for this function:

Here is the caller graph for this function:

void rocksdb::UpdateInternalKey ( char *  internal_key,
const size_t  internal_key_size,
uint64_t  seq,
ValueType  t 
)
inline

Definition at line 171 of file dbformat.h.

References EncodeFixed64().

Referenced by rocksdb::DBImpl::DoCompactionWork(), and rocksdb::MergeHelper::MergeUntil().

173  {
174  assert(internal_key_size >= 8);
175  char* seqtype = internal_key + internal_key_size - 8;
176  uint64_t newval = (seq << 8) | t;
177  EncodeFixed64(seqtype, newval);
178 }

Here is the call graph for this function:

Here is the caller graph for this function:

int rocksdb::VarintLength ( uint64_t  v)

Definition at line 122 of file coding.cc.

Referenced by rocksdb::MemTable::Add(), rocksdb::BlockBuilder::EstimateSizeAfterKV(), TEST(), and rocksdb::MemTable::Update().

122  {
123  int len = 1;
124  while (v >= 128) {
125  v >>= 7;
126  len++;
127  }
128  return len;
129 }

Here is the caller graph for this function:

rocksdb::verbose_ ( false  )

Referenced by path_().

Here is the caller graph for this function:

Status rocksdb::WriteStringToFile ( Env *  env,
const Slice &  data,
const std::string &  fname 
)

Definition at line 82 of file env.cc.

References DoWriteStringToFile().

Referenced by rocksdb::CorruptionTest::Corrupt().

83  {
84  return DoWriteStringToFile(env, data, fname, false);
85 }

Here is the call graph for this function:

Here is the caller graph for this function:

Status rocksdb::WriteStringToFileSync ( Env *  env,
const Slice &  data,
const std::string &  fname 
)

Definition at line 87 of file env.cc.

References DoWriteStringToFile().

Referenced by SetCurrentFile(), and SetIdentityFile().

88  {
89  return DoWriteStringToFile(env, data, fname, true);
90 }

Here is the call graph for this function:

Here is the caller graph for this function:

static bool rocksdb::ZlibCompressionSupported ( const CompressionOptions &  options)
static

Definition at line 40 of file db_test.cc.

References rocksdb::Slice::data(), Json::in(), rocksdb::Slice::size(), and rocksdb::port::Zlib_Compress().

Referenced by GenerateArgList(), MinLevelToCompress(), and TEST().

40  {
41  std::string out;
42  Slice in = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
43  return port::Zlib_Compress(options, in.data(), in.size(), &out);
44 }

Here is the call graph for this function:

Here is the caller graph for this function:

static bool rocksdb::ZlibCompressionSupported ( )
static

Definition at line 460 of file table_test.cc.

References rocksdb::Slice::data(), Json::in(), rocksdb::Slice::size(), and rocksdb::port::Zlib_Compress().

460  {
461  std::string out;
462  Slice in = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
463  return port::Zlib_Compress(Options().compression_opts,
464  in.data(), in.size(),
465  &out);
466 }

Here is the call graph for this function:

Variable Documentation

const char* const rocksdb::access_hints[]
static
Initial value:
= {
"NONE", "NORMAL", "SEQUENTIAL", "WILLNEED"
}

Definition at line 108 of file options.cc.

Referenced by rocksdb::Options::Dump().

const std::string rocksdb::ARCHIVAL_DIR = "archive"
static

Definition at line 41 of file filename.h.

Referenced by ArchivalDirectory(), ArchivedLogFileName(), and ParseFileName().

const Comparator* rocksdb::bytewise
static

Definition at line 75 of file comparator.cc.

Referenced by BytewiseComparator(), and InitModule().

int rocksdb::cfilter_count
static

Definition at line 2536 of file db_test.cc.

Referenced by rocksdb::KeepFilter::Filter(), rocksdb::DeleteFilter::Filter(), and TEST().

rocksdb::else
Initial value:
{
key_ = params.at(0)

Definition at line 1362 of file ldb_cmd.cc.

const std::vector<std::pair<Histograms, std::string> > rocksdb::HistogramsNameMap
Initial value:
= {
{ DB_GET, "rocksdb.db.get.micros" },
{ DB_WRITE, "rocksdb.db.write.micros" },
{ COMPACTION_TIME, "rocksdb.compaction.times.micros" },
{ TABLE_SYNC_MICROS, "rocksdb.table.sync.micros" },
{ COMPACTION_OUTFILE_SYNC_MICROS, "rocksdb.compaction.outfile.sync.micros" },
{ WAL_FILE_SYNC_MICROS, "rocksdb.wal.file.sync.micros" },
{ MANIFEST_FILE_SYNC_MICROS, "rocksdb.manifest.file.sync.micros" },
{ TABLE_OPEN_IO_MICROS, "rocksdb.table.open.io.micros" },
{ DB_MULTIGET, "rocksdb.db.multiget.micros" },
{ READ_BLOCK_COMPACTION_MICROS, "rocksdb.read.block.compaction.micros" },
{ READ_BLOCK_GET_MICROS, "rocksdb.read.block.get.micros" },
{ WRITE_RAW_BLOCK_MICROS, "rocksdb.write.raw.block.micros" },
{ STALL_L0_SLOWDOWN_COUNT, "rocksdb.l0.slowdown.count"},
{ STALL_MEMTABLE_COMPACTION_COUNT, "rocksdb.memtable.compaction.count"},
{ STALL_L0_NUM_FILES_COUNT, "rocksdb.num.files.stall.count"},
{ HARD_RATE_LIMIT_DELAY_COUNT, "rocksdb.hard.rate.limit.delay.count"},
{ SOFT_RATE_LIMIT_DELAY_COUNT, "rocksdb.soft.rate.limit.delay.count"},
{ NUM_FILES_IN_SINGLE_COMPACTION, "rocksdb.numfiles.in.singlecompaction" },
}

Definition at line 189 of file statistics.h.

Referenced by RecordTick(), SetTickerCount(), and rocksdb::Statistics::ToString().

const size_t rocksdb::kBlockTrailerSize = 5
static
const std::string rocksdb::kDbName = "/tmp/mergetestdb"
const int rocksdb::kDebugLogChunkSize = 128 * 1024

Definition at line 28 of file posix_logger.h.

Referenced by rocksdb::PosixLogger::Logv().

const int rocksdb::kDelayMicros = 100000
static

Definition at line 22 of file env_test.cc.

Referenced by TEST().

const size_t rocksdb::kFilterBase = 1 << kFilterBaseLg
static

Definition at line 22 of file filter_block.cc.

Referenced by rocksdb::FilterBlockBuilder::StartBlock().

const size_t rocksdb::kFilterBaseLg = 11
static

Definition at line 21 of file filter_block.cc.

Referenced by rocksdb::FilterBlockBuilder::Finish().

const int rocksdb::kMajorVersion = 2
static
const size_t rocksdb::kMaxCacheKeyPrefixSize = kMaxVarint64Length*3+1

Definition at line 35 of file block_based_table_reader.cc.

const unsigned int rocksdb::kMaxVarint32Length = 5

Definition at line 24 of file coding.h.

const unsigned int rocksdb::kMaxVarint64Length = 10
const int rocksdb::kMinorVersion = 0
static
const uint64_t rocksdb::kTableMagicNumber = 0xdb4775248b80fb57ull
static

Definition at line 85 of file format.h.

Referenced by rocksdb::Footer::DecodeFrom(), and rocksdb::Footer::EncodeTo().

const int rocksdb::kValueSize = 1000
static

Definition at line 30 of file corruption_test.cc.

Referenced by TEST(), and rocksdb::CorruptionTest::Value().

const int rocksdb::kVerbose = 1
static
std::string rocksdb::NEW_VALUE = "NewValue"
static

Definition at line 2537 of file db_test.cc.

Referenced by rocksdb::ChangeFilter::Filter(), and TEST().

port::OnceType rocksdb::once = LEVELDB_ONCE_INIT
static

Definition at line 74 of file comparator.cc.

Referenced by BytewiseComparator(), sqlite3VdbeExec(), and whereLoopAddOr().

enum PerfLevel rocksdb::perf_level = kEnableCount

Definition at line 11 of file perf_context.cc.

Referenced by BumpPerfCount(), BumpPerfTime(), SetPerfLevel(), and StartPerfTimer().

ReverseKeyComparator rocksdb::reverse_key_comparator
static

Definition at line 74 of file table_test.cc.

Referenced by Increment(), and rocksdb::Harness::Init().

const std::vector<std::pair<Tickers, std::string> > rocksdb::TickersNameMap
Initial value:
= {
{ BLOCK_CACHE_MISS, "rocksdb.block.cache.miss" },
{ BLOCK_CACHE_HIT, "rocksdb.block.cache.hit" },
{ BLOCK_CACHE_ADD, "rocksdb.block.cache.add" },
{ BLOCK_CACHE_INDEX_MISS, "rocksdb.block.cache.index.miss" },
{ BLOCK_CACHE_INDEX_HIT, "rocksdb.block.cache.index.hit" },
{ BLOCK_CACHE_FILTER_MISS, "rocksdb.block.cache.filter.miss" },
{ BLOCK_CACHE_FILTER_HIT, "rocksdb.block.cache.filter.hit" },
{ BLOCK_CACHE_DATA_MISS, "rocksdb.block.cache.data.miss" },
{ BLOCK_CACHE_DATA_HIT, "rocksdb.block.cache.data.hit" },
{ BLOOM_FILTER_USEFUL, "rocksdb.bloom.filter.useful" },
{ COMPACTION_KEY_DROP_NEWER_ENTRY, "rocksdb.compaction.key.drop.new" },
{ COMPACTION_KEY_DROP_OBSOLETE, "rocksdb.compaction.key.drop.obsolete" },
{ COMPACTION_KEY_DROP_USER, "rocksdb.compaction.key.drop.user" },
{ NUMBER_KEYS_WRITTEN, "rocksdb.number.keys.written" },
{ NUMBER_KEYS_READ, "rocksdb.number.keys.read" },
{ NUMBER_KEYS_UPDATED, "rocksdb.number.keys.updated" },
{ BYTES_WRITTEN, "rocksdb.bytes.written" },
{ BYTES_READ, "rocksdb.bytes.read" },
{ NO_FILE_CLOSES, "rocksdb.no.file.closes" },
{ NO_FILE_OPENS, "rocksdb.no.file.opens" },
{ NO_FILE_ERRORS, "rocksdb.no.file.errors" },
{ STALL_L0_SLOWDOWN_MICROS, "rocksdb.l0.slowdown.micros" },
{ STALL_MEMTABLE_COMPACTION_MICROS, "rocksdb.memtable.compaction.micros" },
{ STALL_L0_NUM_FILES_MICROS, "rocksdb.l0.num.files.stall.micros" },
{ RATE_LIMIT_DELAY_MILLIS, "rocksdb.rate.limit.delay.millis" },
{ NO_ITERATORS, "rocksdb.num.iterators" },
{ NUMBER_MULTIGET_CALLS, "rocksdb.number.multiget.get" },
{ NUMBER_MULTIGET_KEYS_READ, "rocksdb.number.multiget.keys.read" },
{ NUMBER_MULTIGET_BYTES_READ, "rocksdb.number.multiget.bytes.read" },
{ NUMBER_FILTERED_DELETES, "rocksdb.number.deletes.filtered" },
{ NUMBER_MERGE_FAILURES, "rocksdb.number.merge.failures" },
{ SEQUENCE_NUMBER, "rocksdb.sequence.number" },
{ BLOOM_FILTER_PREFIX_CHECKED, "rocksdb.bloom.filter.prefix.checked" },
{ BLOOM_FILTER_PREFIX_USEFUL, "rocksdb.bloom.filter.prefix.useful" },
{ NUMBER_OF_RESEEKS_IN_ITERATION, "rocksdb.number.reseeks.iteration" },
{ GET_UPDATES_SINCE_CALLS, "rocksdb.getupdatessince.calls" },
{ BLOCK_CACHE_COMPRESSED_MISS, "rocksdb.block.cachecompressed.miss" },
{ BLOCK_CACHE_COMPRESSED_HIT, "rocksdb.block.cachecompressed.hit" }
}

Definition at line 117 of file statistics.h.

Referenced by RecordTick(), SetTickerCount(), and rocksdb::Statistics::ToString().